This commit is contained in:
√(noham)²
2024-07-26 16:00:08 +02:00
parent 90e2dadf18
commit c9c3228b2c
7501 changed files with 3266200 additions and 1 deletions

View File

@@ -0,0 +1,315 @@
if (NOT CMAKE_CROSSCOMPILING)
file(RELATIVE_PATH __loc_relative "${OpenCV_BINARY_DIR}" "${CMAKE_CURRENT_LIST_DIR}/pattern_tools\n")
file(APPEND "${OpenCV_BINARY_DIR}/opencv_apps_python_tests.cfg" "${__loc_relative}")
endif()
if(NOT BUILD_DOCS)
return()
endif()
# Dependencies scheme (* - optional):
#
# javadoc* -> doxygen_javadoc* -> doxygen_cpp ---------> doxygen -> opencv_docs
# \ \ / /
# \ -> doxygen_python* -> /
# ---------------------------------------------------------->
find_package(Doxygen)
if(DOXYGEN_FOUND)
if (DOXYGEN_VERSION VERSION_LESS 1.9.8)
message(WARNING "Found doxygen ${DOXYGEN_VERSION}, version 1.9.8 is used for testing, there is "
"a chance your documentation will look different or have some limitations.")
endif()
add_custom_target(doxygen)
# not documented modules list
set(blacklist "${DOXYGEN_BLACKLIST}")
list(APPEND blacklist "ts" "world")
unset(CMAKE_DOXYGEN_TUTORIAL_CONTRIB_ROOT)
unset(CMAKE_DOXYGEN_TUTORIAL_JS_ROOT)
set(OPENCV_DOCS_EXCLUDE_CUDA ON)
if(";${OPENCV_MODULES_EXTRA};" MATCHES ";cudev;")
set(OPENCV_DOCS_EXCLUDE_CUDA OFF)
list(APPEND CMAKE_DOXYGEN_ENABLED_SECTIONS "CUDA_MODULES")
endif()
# gathering headers
set(paths_include)
set(paths_doc)
set(paths_bib)
set(paths_sample)
set(paths_tutorial)
set(paths_hal_interface)
set(refs_main)
set(refs_extra)
set(deps)
foreach(m ${OPENCV_MODULES_MAIN} ${OPENCV_MODULES_EXTRA})
set(the_module "${m}")
if(NOT the_module MATCHES "^opencv_")
set(the_module "opencv_${m}")
endif()
list(FIND blacklist ${m} _pos)
if(NOT EXISTS "${OPENCV_MODULE_${the_module}_LOCATION}/include"
AND NOT EXISTS "${OPENCV_MODULE_${the_module}_LOCATION}/doc"
)
set(_pos -2) # blacklist
endif()
if(${_pos} EQUAL -1)
list(APPEND CMAKE_DOXYGEN_ENABLED_SECTIONS "HAVE_opencv_${m}")
# include folder
set(header_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/include")
if(EXISTS "${header_dir}")
list(APPEND paths_include "${header_dir}")
list(APPEND deps ${header_dir})
if(OPENCV_DOCS_EXCLUDE_CUDA)
if(EXISTS "${OPENCV_MODULE_opencv_${m}_LOCATION}/include/opencv2/${m}/cuda")
list(APPEND CMAKE_DOXYGEN_EXCLUDE_LIST "${OPENCV_MODULE_opencv_${m}_LOCATION}/include/opencv2/${m}/cuda")
endif()
file(GLOB list_cuda_files "${OPENCV_MODULE_opencv_${m}_LOCATION}/include/opencv2/${m}/*cuda*.hpp")
if(list_cuda_files)
list(APPEND CMAKE_DOXYGEN_EXCLUDE_LIST ${list_cuda_files})
endif()
endif()
endif()
# doc folder
set(docs_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/doc")
if(EXISTS "${docs_dir}")
list(APPEND paths_doc "${docs_dir}")
list(APPEND deps ${docs_dir})
endif()
# sample folder
set(sample_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/samples")
if(EXISTS "${sample_dir}")
list(APPEND paths_sample "${sample_dir}")
list(APPEND deps ${sample_dir})
endif()
# tutorial folder
set(tutorial_dir "${OPENCV_MODULE_opencv_${m}_LOCATION}/tutorials")
if(EXISTS "${tutorial_dir}")
list(APPEND paths_tutorial "${tutorial_dir}")
list(APPEND deps ${tutorial_dir})
# tutorial reference entry
file(GLOB tutorials RELATIVE "${OPENCV_MODULE_opencv_${m}_LOCATION}" "${tutorial_dir}/*.markdown")
foreach (t ${tutorials})
if (NOT DEFINED CMAKE_DOXYGEN_TUTORIAL_CONTRIB_ROOT)
set(CMAKE_DOXYGEN_TUTORIAL_CONTRIB_ROOT "- @ref tutorial_contrib_root")
set(tutorial_contrib_root "${CMAKE_CURRENT_BINARY_DIR}/contrib_tutorials.markdown")
file(WRITE "${tutorial_contrib_root}"
"Tutorials for contrib modules {#tutorial_contrib_root}\n"
"=============================\n")
endif()
file(STRINGS "${OPENCV_MODULE_opencv_${m}_LOCATION}/${t}" tutorial_id LIMIT_COUNT 1 REGEX ".*{#[^}]+}")
string(REGEX REPLACE ".*{#([^}]+)}" "\\1" tutorial_id "${tutorial_id}")
file(APPEND "${tutorial_contrib_root}" "- ${m}. @subpage ${tutorial_id}\n")
endforeach()
endif()
# HAL replacement file
set(replacement_header "${OPENCV_MODULE_opencv_${m}_LOCATION}/src/hal_replacement.hpp")
if(EXISTS "${replacement_header}")
list(APPEND paths_hal_interface "${replacement_header}")
endif()
# BiBTeX file
set(bib_file "${docs_dir}/${m}.bib")
if(EXISTS "${bib_file}")
set(paths_bib "${paths_bib} ${bib_file}")
list(APPEND deps ${bib_file})
endif()
# Reference entry
set(one_ref "\t- ${m}. @ref ${m}\n")
list(FIND OPENCV_MODULES_EXTRA ${m} _pos)
if(${_pos} EQUAL -1)
set(refs_main "${refs_main}${one_ref}")
else()
set(refs_extra "${refs_extra}${one_ref}")
endif()
endif()
endforeach()
# fix references
# set(ref_header "Module name | Folder\n----------- | ------")
# if(refs_main)
# set(refs_main "### Main modules\n${ref_header}\n${refs_main}")
# endif()
# if(refs_extra)
# set(refs_extra "### Extra modules\n${ref_header}\n${refs_extra}")
# endif()
if(refs_main)
set(refs_main "- Main modules:\n${refs_main}")
endif()
if(refs_extra)
set(refs_extra "- Extra modules:\n${refs_extra}")
endif()
# additional config
set(doxyfile "${CMAKE_CURRENT_BINARY_DIR}/Doxyfile")
set(rootfile "${CMAKE_CURRENT_BINARY_DIR}/root.markdown")
set(bibfile "${CMAKE_CURRENT_SOURCE_DIR}/opencv.bib")
set(faqfile "${CMAKE_CURRENT_SOURCE_DIR}/faq.markdown")
set(tutorial_path "${CMAKE_CURRENT_SOURCE_DIR}/tutorials")
set(tutorial_py_path "${CMAKE_CURRENT_SOURCE_DIR}/py_tutorials")
set(CMAKE_DOXYGEN_TUTORIAL_JS_ROOT "- @ref tutorial_js_root")
set(tutorial_js_path "${CMAKE_CURRENT_SOURCE_DIR}/js_tutorials")
set(example_path "${CMAKE_SOURCE_DIR}/samples")
set(doxygen_image_path
${CMAKE_CURRENT_SOURCE_DIR}/images
${paths_doc}
${tutorial_path}
${tutorial_py_path}
${tutorial_js_path}
${paths_tutorial}
#${OpenCV_SOURCE_DIR}/samples/data # TODO: need to resolve ambiguous conflicts first
${OpenCV_SOURCE_DIR}
${OpenCV_SOURCE_DIR}/modules # <opencv>/modules
${OPENCV_EXTRA_MODULES_PATH} # <opencv_contrib>/modules
${OPENCV_DOCS_EXTRA_IMAGE_PATH} # custom variable for user modules
)
# set export variables
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_INPUT_LIST "${rootfile} ; ${faqfile} ; ${paths_include} ; ${paths_hal_interface} ; ${paths_doc} ; ${tutorial_path} ; ${tutorial_py_path} ; ${tutorial_js_path} ; ${paths_tutorial} ; ${tutorial_contrib_root}")
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_IMAGE_PATH "${doxygen_image_path}")
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_EXCLUDE_LIST "${CMAKE_DOXYGEN_EXCLUDE_LIST}")
string(REPLACE ";" " " CMAKE_DOXYGEN_ENABLED_SECTIONS "${CMAKE_DOXYGEN_ENABLED_SECTIONS}")
# TODO: remove paths_doc from EXAMPLE_PATH after face module tutorials/samples moved to separate folders
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_EXAMPLE_PATH "${example_path} ; ${paths_doc} ; ${paths_sample}")
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_INCLUDE_ROOTS "${paths_include}")
set(CMAKE_DOXYGEN_LAYOUT "${CMAKE_CURRENT_BINARY_DIR}/DoxygenLayout.xml")
set(CMAKE_DOXYGEN_OUTPUT_PATH "doxygen")
set(CMAKE_DOXYGEN_MAIN_REFERENCE "${refs_main}")
set(CMAKE_DOXYGEN_EXTRA_REFERENCE "${refs_extra}")
set(CMAKE_EXTRA_BIB_FILES "${bibfile} ${paths_bib}")
if (CMAKE_DOXYGEN_GENERATE_QHP)
set(CMAKE_DOXYGEN_GENERATE_QHP "YES")
else()
set(CMAKE_DOXYGEN_GENERATE_QHP "NO")
endif()
list(APPEND CMAKE_DOXYGEN_HTML_FILES "${CMAKE_CURRENT_SOURCE_DIR}/opencv.ico")
list(APPEND CMAKE_DOXYGEN_HTML_FILES "${CMAKE_CURRENT_SOURCE_DIR}/pattern.png")
list(APPEND CMAKE_DOXYGEN_HTML_FILES "${CMAKE_CURRENT_SOURCE_DIR}/acircles_pattern.png")
list(APPEND CMAKE_DOXYGEN_HTML_FILES "${CMAKE_CURRENT_SOURCE_DIR}/bodybg.png")
# list(APPEND CMAKE_DOXYGEN_HTML_FILES "${CMAKE_CURRENT_SOURCE_DIR}/mymath.sty")
list(APPEND CMAKE_DOXYGEN_HTML_FILES "${CMAKE_CURRENT_SOURCE_DIR}/tutorial-utils.js")
string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_HTML_FILES "${CMAKE_DOXYGEN_HTML_FILES}")
if (DOXYGEN_DOT_EXECUTABLE)
message(STATUS "Found DOT executable: ${DOXYGEN_DOT_EXECUTABLE}")
set(init_dot_path "${DOXYGEN_DOT_EXECUTABLE}")
set(init_dot_mode "YES")
else()
set(init_dot_path "")
set(init_dot_mode "NO")
endif()
set(OPENCV_DOCS_DOT_PATH "${init_dot_path}" CACHE PATH "Doxygen/DOT_PATH value")
set(OPENCV_DOCS_HAVE_DOT "${init_dot_mode}" CACHE BOOL "Doxygen: build extra diagrams")
set(CMAKECONFIG_DOT_PATH "${OPENCV_DOCS_DOT_PATH}")
set(CMAKECONFIG_HAVE_DOT "${OPENCV_DOCS_HAVE_DOT}")
# 'png' is good enough for compatibility (but requires +50% storage space)
set(OPENCV_DOCS_DOT_IMAGE_FORMAT "svg" CACHE STRING "Doxygen/DOT_IMAGE_FORMAT value")
set(CMAKECONFIG_DOT_IMAGE_FORMAT "${OPENCV_DOCS_DOT_IMAGE_FORMAT}")
# Doxygen 1.8.16 fix: https://github.com/doxygen/doxygen/pull/6870
# NO is needed here: https://github.com/opencv/opencv/pull/16039
set(OPENCV_DOCS_INTERACTIVE_SVG "NO" CACHE BOOL "Doxygen/INTERACTIVE_SVG value")
set(CMAKECONFIG_INTERACTIVE_SVG "${OPENCV_DOCS_INTERACTIVE_SVG}")
set(OPENCV_DOCS_DOXYFILE_IN "Doxyfile.in" CACHE PATH "Doxygen configuration template file (Doxyfile.in)")
set(OPENCV_DOCS_DOXYGEN_LAYOUT "DoxygenLayout.xml" CACHE PATH "Doxygen layout file (.xml)")
# writing file
configure_file("${OPENCV_DOCS_DOXYGEN_LAYOUT}" DoxygenLayout.xml @ONLY)
configure_file("${OPENCV_DOCS_DOXYFILE_IN}" ${doxyfile} @ONLY)
configure_file(root.markdown.in ${rootfile} @ONLY)
# js tutorial assets
set(opencv_tutorial_html_dir "${CMAKE_CURRENT_BINARY_DIR}/doxygen/html")
set(js_tutorials_assets_dir "${CMAKE_CURRENT_SOURCE_DIR}/js_tutorials/js_assets")
set(js_tutorials_assets_deps "")
# make sure the build directory exists
file(MAKE_DIRECTORY "${opencv_tutorial_html_dir}")
# gather and copy specific files for js tutorials
file(GLOB_RECURSE js_assets "${js_tutorials_assets_dir}/*")
ocv_list_filterout(js_assets "\\\\.eslintrc.json")
list(APPEND js_assets "${OpenCV_SOURCE_DIR}/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/Data/box.mp4")
if(BUILD_opencv_js)
set(ocv_js_dir "${CMAKE_BINARY_DIR}/bin")
set(ocv_js "opencv.js")
list(APPEND js_assets "${ocv_js_dir}/${ocv_js}")
elseif(DEFINED OPENCV_JS_LOCATION)
list(APPEND js_assets "${OPENCV_JS_LOCATION}")
endif()
# copy haar cascade files
set(haar_cascade_files "")
set(data_harrcascades_path "${OpenCV_SOURCE_DIR}/data/haarcascades/")
list(APPEND js_tutorials_assets_deps "${data_harrcascades_path}/haarcascade_frontalface_default.xml" "${data_harrcascades_path}/haarcascade_eye.xml")
list(APPEND js_assets "${data_harrcascades_path}/haarcascade_frontalface_default.xml" "${data_harrcascades_path}/haarcascade_eye.xml")
foreach(f ${js_assets})
get_filename_component(fname "${f}" NAME)
add_custom_command(OUTPUT "${opencv_tutorial_html_dir}/${fname}"
COMMAND ${CMAKE_COMMAND} -E copy_if_different "${f}" "${opencv_tutorial_html_dir}/${fname}"
DEPENDS "${f}"
COMMENT "Copying ${fname}"
)
list(APPEND js_tutorials_assets_deps "${f}" "${opencv_tutorial_html_dir}/${fname}")
endforeach()
add_custom_target(
doxygen_cpp
COMMAND ${DOXYGEN_EXECUTABLE} ${doxyfile}
DEPENDS ${doxyfile} ${rootfile} ${bibfile} ${deps} ${js_tutorials_assets_deps}
COMMENT "Generate Doxygen documentation"
)
if(NOT DEFINED HAVE_PYTHON_BS4 AND PYTHON_DEFAULT_EXECUTABLE)
# Documentation post-processing tool requires BuautifulSoup Python package
execute_process(COMMAND "${PYTHON_DEFAULT_EXECUTABLE}" -c "import bs4; from bs4 import BeautifulSoup; print(bs4.__version__)"
RESULT_VARIABLE _result
OUTPUT_VARIABLE _bs4_version
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(NOT _result EQUAL 0)
set(HAVE_PYTHON_BS4 0 CACHE INTERNAL "")
else()
message(STATUS "Python BeautifulSoup (bs4) version: ${_bs4_version}")
set(HAVE_PYTHON_BS4 1 CACHE INTERNAL "")
endif()
endif()
if(PYTHON_DEFAULT_EXECUTABLE
AND HAVE_PYTHON_BS4
AND OPENCV_PYTHON_SIGNATURES_FILE
AND TARGET gen_opencv_python_source)
add_custom_target(doxygen_python
COMMAND ${PYTHON_DEFAULT_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/tools/add_signatures.py" "${CMAKE_CURRENT_BINARY_DIR}/doxygen/html/" "${OPENCV_PYTHON_SIGNATURES_FILE}" "python"
DEPENDS doxygen_cpp gen_opencv_python_source
COMMENT "Inject Python signatures into documentation"
)
endif()
add_dependencies(doxygen doxygen_cpp)
if(TARGET doxygen_python)
add_dependencies(doxygen doxygen_python)
endif()
if(TARGET doxygen_javadoc)
add_dependencies(doxygen_cpp doxygen_javadoc)
endif()
add_dependencies(opencv_docs doxygen)
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/doxygen/html
DESTINATION "${OPENCV_DOC_INSTALL_PATH}"
COMPONENT "docs" OPTIONAL
${compatible_MESSAGE_NEVER}
)
endif()

View File

@@ -0,0 +1,351 @@
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = OpenCV
PROJECT_NUMBER = @OPENCV_VERSION@
PROJECT_BRIEF = "Open Source Computer Vision"
PROJECT_LOGO = @CMAKE_CURRENT_SOURCE_DIR@/opencv-logo-small.png
#PROJECT_ICON =
OUTPUT_DIRECTORY = @CMAKE_DOXYGEN_OUTPUT_PATH@
CREATE_SUBDIRS = YES
CREATE_SUBDIRS_LEVEL = 8
ALLOW_UNICODE_NAMES = NO
OUTPUT_LANGUAGE = English
BRIEF_MEMBER_DESC = YES
REPEAT_BRIEF = YES
ABBREVIATE_BRIEF = "The $name class" \
"The $name widget" \
"The $name file" \
is \
provides \
specifies \
contains \
represents \
a \
an \
the
ALWAYS_DETAILED_SEC = NO
INLINE_INHERITED_MEMB = NO
FULL_PATH_NAMES = YES
STRIP_FROM_PATH = @CMAKE_SOURCE_DIR@/modules @CMAKE_DOXYGEN_INCLUDE_ROOTS@
STRIP_FROM_INC_PATH = @CMAKE_DOXYGEN_INCLUDE_ROOTS@
SHORT_NAMES = NO
JAVADOC_AUTOBRIEF = NO
JAVADOC_BANNER = NO
QT_AUTOBRIEF = NO
MULTILINE_CPP_IS_BRIEF = NO
PYTHON_DOCSTRING = YES
INHERIT_DOCS = YES
SEPARATE_MEMBER_PAGES = NO
TAB_SIZE = 4
ALIASES += add_toggle{1}="@htmlonly[block] <div class='newInnerHTML'>\1</div><div> <script type='text/javascript'> addToggle(); </script>@endhtmlonly"
ALIASES += add_toggle_cpp="@htmlonly[block] <div class='newInnerHTML' title='cpp' style='display: none;'>C++</div><div class='toggleable_div label_cpp' style='display: none;'>@endhtmlonly"
ALIASES += add_toggle_java="@htmlonly[block] <div class='newInnerHTML' title='java' style='display: none;'>Java</div><div class='toggleable_div label_java' style='display: none;'>@endhtmlonly"
ALIASES += add_toggle_python="@htmlonly[block] <div class='newInnerHTML' title='python' style='display: none;'>Python</div><div class='toggleable_div label_python' style='display: none;'>@endhtmlonly"
ALIASES += end_toggle="@htmlonly[block] </div> @endhtmlonly"
ALIASES += prev_tutorial{1}="**Prev Tutorial:** \ref \1 \n"
ALIASES += next_tutorial{1}="**Next Tutorial:** \ref \1 \n"
ALIASES += youtube{1}="@htmlonly[block]<div align='center'><iframe title='Video' width='560' height='349' src='https://www.youtube.com/embed/\1?rel=0' frameborder='0' align='middle' allowfullscreen></iframe></div>@endhtmlonly"
OPTIMIZE_OUTPUT_FOR_C = NO
OPTIMIZE_OUTPUT_JAVA = NO
OPTIMIZE_FOR_FORTRAN = NO
OPTIMIZE_OUTPUT_VHDL = NO
OPTIMIZE_OUTPUT_SLICE = NO
EXTENSION_MAPPING =
MARKDOWN_SUPPORT = YES
TOC_INCLUDE_HEADINGS = 5
MARKDOWN_ID_STYLE = DOXYGEN
AUTOLINK_SUPPORT = YES
BUILTIN_STL_SUPPORT = YES
CPP_CLI_SUPPORT = NO
SIP_SUPPORT = NO
IDL_PROPERTY_SUPPORT = YES
DISTRIBUTE_GROUP_DOC = NO
GROUP_NESTED_COMPOUNDS = NO
SUBGROUPING = YES
INLINE_GROUPED_CLASSES = NO
INLINE_SIMPLE_STRUCTS = NO
TYPEDEF_HIDES_STRUCT = YES
LOOKUP_CACHE_SIZE = 0
NUM_PROC_THREADS = 1
TIMESTAMP = YES
EXTRACT_ALL = YES
EXTRACT_PRIVATE = NO
EXTRACT_PRIV_VIRTUAL = NO
EXTRACT_PACKAGE = NO
EXTRACT_STATIC = YES
EXTRACT_LOCAL_CLASSES = NO
EXTRACT_LOCAL_METHODS = NO
EXTRACT_ANON_NSPACES = NO
RESOLVE_UNNAMED_PARAMS = YES
HIDE_UNDOC_MEMBERS = NO
HIDE_UNDOC_CLASSES = NO
HIDE_FRIEND_COMPOUNDS = NO
HIDE_IN_BODY_DOCS = NO
INTERNAL_DOCS = NO
CASE_SENSE_NAMES = YES
HIDE_SCOPE_NAMES = NO
HIDE_COMPOUND_REFERENCE= NO
SHOW_HEADERFILE = YES
SHOW_INCLUDE_FILES = YES
SHOW_GROUPED_MEMB_INC = YES
FORCE_LOCAL_INCLUDES = NO
INLINE_INFO = YES
SORT_MEMBER_DOCS = YES
SORT_BRIEF_DOCS = YES
SORT_MEMBERS_CTORS_1ST = YES
SORT_GROUP_NAMES = NO
SORT_BY_SCOPE_NAME = NO
STRICT_PROTO_MATCHING = NO
GENERATE_TODOLIST = YES
GENERATE_TESTLIST = YES
GENERATE_BUGLIST = YES
GENERATE_DEPRECATEDLIST= YES
ENABLED_SECTIONS = @CMAKE_DOXYGEN_ENABLED_SECTIONS@
MAX_INITIALIZER_LINES = 30
SHOW_USED_FILES = YES
SHOW_FILES = YES
SHOW_NAMESPACES = YES
FILE_VERSION_FILTER =
LAYOUT_FILE = @CMAKE_DOXYGEN_LAYOUT@
CITE_BIB_FILES = @CMAKE_EXTRA_BIB_FILES@
QUIET = YES
WARNINGS = YES
WARN_IF_UNDOCUMENTED = YES
WARN_IF_DOC_ERROR = YES
WARN_IF_INCOMPLETE_DOC = YES
WARN_NO_PARAMDOC = NO
WARN_IF_UNDOC_ENUM_VAL = NO
WARN_AS_ERROR = NO
WARN_FORMAT = "$file:$line: $text"
WARN_LINE_FORMAT = "at line $line of file $file"
WARN_LOGFILE =
INPUT = @CMAKE_DOXYGEN_INPUT_LIST@
INPUT_ENCODING = UTF-8
INPUT_FILE_ENCODING =
FILE_PATTERNS =
RECURSIVE = YES
EXCLUDE = @CMAKE_DOXYGEN_EXCLUDE_LIST@
EXCLUDE_SYMLINKS = NO
EXCLUDE_PATTERNS = *.inl.hpp *.impl.hpp *_detail.hpp */cudev/**/detail/*.hpp *.m */opencl/runtime/* */legacy/* *_c.h @DOXYGEN_EXCLUDE_PATTERNS@
EXCLUDE_SYMBOLS = cv::DataType<*> cv::traits::* int void CV__* T __CV* cv::gapi::detail*
EXAMPLE_PATH = @CMAKE_DOXYGEN_EXAMPLE_PATH@
EXAMPLE_PATTERNS = *
EXAMPLE_RECURSIVE = YES
IMAGE_PATH = @CMAKE_DOXYGEN_IMAGE_PATH@
INPUT_FILTER =
FILTER_PATTERNS =
FILTER_SOURCE_FILES = NO
FILTER_SOURCE_PATTERNS =
USE_MDFILE_AS_MAINPAGE =
SOURCE_BROWSER = NO
INLINE_SOURCES = NO
STRIP_CODE_COMMENTS = YES
REFERENCED_BY_RELATION = NO
REFERENCES_RELATION = NO
REFERENCES_LINK_SOURCE = YES
SOURCE_TOOLTIPS = YES
USE_HTAGS = NO
VERBATIM_HEADERS = NO
CLANG_ASSISTED_PARSING = NO
CLANG_ADD_INC_PATHS = YES
CLANG_OPTIONS =
CLANG_DATABASE_PATH =
ALPHABETICAL_INDEX = YES
IGNORE_PREFIX =
GENERATE_HTML = YES
HTML_OUTPUT = html
HTML_FILE_EXTENSION = .html
HTML_HEADER = @CMAKE_CURRENT_SOURCE_DIR@/header.html
HTML_FOOTER = @CMAKE_CURRENT_SOURCE_DIR@/footer.html
HTML_STYLESHEET =
HTML_EXTRA_STYLESHEET = @CMAKE_CURRENT_SOURCE_DIR@/stylesheet.css
HTML_EXTRA_FILES = @CMAKE_DOXYGEN_HTML_FILES@
HTML_COLORSTYLE = LIGHT
HTML_COLORSTYLE_HUE = 220
HTML_COLORSTYLE_SAT = 100
HTML_COLORSTYLE_GAMMA = 80
HTML_DYNAMIC_MENUS = YES
HTML_DYNAMIC_SECTIONS = NO
HTML_CODE_FOLDING = YES
#HTML_COPY_CLIPBOARD = YES
#HTML_PROJECT_COOKIE =
HTML_INDEX_NUM_ENTRIES = 100
GENERATE_DOCSET = NO
DOCSET_FEEDNAME = "Doxygen generated docs"
DOCSET_FEEDURL =
DOCSET_BUNDLE_ID = org.doxygen.Project
DOCSET_PUBLISHER_ID = org.doxygen.Publisher
DOCSET_PUBLISHER_NAME = Publisher
GENERATE_HTMLHELP = NO
CHM_FILE =
HHC_LOCATION =
GENERATE_CHI = NO
CHM_INDEX_ENCODING =
BINARY_TOC = NO
TOC_EXPAND = NO
SITEMAP_URL =
GENERATE_QHP = @CMAKE_DOXYGEN_GENERATE_QHP@
QCH_FILE = ../opencv-@OPENCV_VERSION@.qch
QHP_NAMESPACE = org.opencv.@OPENCV_VERSION@
QHP_VIRTUAL_FOLDER = opencv
QHP_CUST_FILTER_NAME =
QHP_CUST_FILTER_ATTRS =
QHP_SECT_FILTER_ATTRS =
QHG_LOCATION =
GENERATE_ECLIPSEHELP = NO
ECLIPSE_DOC_ID = org.doxygen.Project
DISABLE_INDEX = NO
GENERATE_TREEVIEW = NO
FULL_SIDEBAR = NO
ENUM_VALUES_PER_LINE = 1
TREEVIEW_WIDTH = 250
EXT_LINKS_IN_WINDOW = YES
OBFUSCATE_EMAILS = YES
HTML_FORMULA_FORMAT = svg
FORMULA_FONTSIZE = 14
FORMULA_MACROFILE =
USE_MATHJAX = YES
MATHJAX_VERSION = MathJax_3
MATHJAX_FORMAT = chtml
MATHJAX_RELPATH = @OPENCV_MATHJAX_RELPATH@
MATHJAX_EXTENSIONS = ams
MATHJAX_CODEFILE = @CMAKE_CURRENT_SOURCE_DIR@/mymath.js
SEARCHENGINE = YES
SERVER_BASED_SEARCH = NO
EXTERNAL_SEARCH = NO
SEARCHENGINE_URL =
SEARCHDATA_FILE = searchdata.xml
EXTERNAL_SEARCH_ID =
EXTRA_SEARCH_MAPPINGS =
GENERATE_LATEX = NO
LATEX_OUTPUT = latex
LATEX_CMD_NAME = latex
MAKEINDEX_CMD_NAME = makeindex
LATEX_MAKEINDEX_CMD = makeindex
COMPACT_LATEX = NO
PAPER_TYPE = a4
EXTRA_PACKAGES = mymath
LATEX_HEADER =
LATEX_FOOTER =
LATEX_EXTRA_STYLESHEET =
LATEX_EXTRA_FILES =
PDF_HYPERLINKS = YES
USE_PDFLATEX = YES
LATEX_BATCHMODE = NO
LATEX_HIDE_INDICES = NO
LATEX_BIB_STYLE = plain
LATEX_EMOJI_DIRECTORY =
GENERATE_RTF = NO
RTF_OUTPUT = rtf
COMPACT_RTF = NO
RTF_HYPERLINKS = NO
RTF_STYLESHEET_FILE =
RTF_EXTENSIONS_FILE =
GENERATE_MAN = NO
MAN_OUTPUT = man
MAN_EXTENSION = .3
MAN_SUBDIR =
MAN_LINKS = NO
GENERATE_XML = NO
XML_OUTPUT = xml
XML_PROGRAMLISTING = YES
XML_NS_MEMB_FILE_SCOPE = NO
GENERATE_DOCBOOK = NO
DOCBOOK_OUTPUT = docbook
GENERATE_AUTOGEN_DEF = NO
GENERATE_SQLITE3 = NO
SQLITE3_OUTPUT = sqlite3
SQLITE3_RECREATE_DB = YES
GENERATE_PERLMOD = NO
PERLMOD_LATEX = NO
PERLMOD_PRETTY = YES
PERLMOD_MAKEVAR_PREFIX =
ENABLE_PREPROCESSING = YES
MACRO_EXPANSION = YES
EXPAND_ONLY_PREDEF = NO
SEARCH_INCLUDES = YES
INCLUDE_PATH =
INCLUDE_FILE_PATTERNS =
PREDEFINED = __cplusplus=1 \
CVAPI(x)=x \
CV_API_CALL= \
CV_DOXYGEN= \
CV_EXPORTS= \
CV_EXPORTS_W= \
CV_EXPORTS_W_SIMPLE= \
CV_EXPORTS_AS(x)= \
CV_EXPORTS_W_MAP= \
GAPI_EXPORTS= \
GAPI_EXPORTS_W= \
CV_IN_OUT= \
CV_OUT= \
CV_PROP= \
CV_PROP_RW= \
CV_WRAP= \
CV_WRAP_AS(x)= \
CV_WRAP_MAPPABLE(x)= \
CV_WRAP_PHANTOM(x)= \
CV_WRAP_DEFAULT(x)= \
CV_CDECL= \
CV_Func= \
CV_DO_PRAGMA(x)= \
CV_SUPPRESS_DEPRECATED_START= \
CV_SUPPRESS_DEPRECATED_END= \
CV_INLINE= \
CV_NORETURN= \
CV_DEFAULT(x)=" = x" \
CV_NEON=1 \
CV_SSE2=1 \
CV_SIMD128=1 \
CV_SIMD256=1 \
CV_SIMD512=1 \
CV_SIMD128_64F=1 \
CV_SIMD256_64F=1 \
CV_SIMD512_64F=1 \
CV__DEBUG_NS_BEGIN= \
CV__DEBUG_NS_END= \
CV_DEPRECATED_EXTERNAL= \
CV_DEPRECATED=
EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = YES
TAGFILES =
GENERATE_TAGFILE = @CMAKE_DOXYGEN_OUTPUT_PATH@/html/opencv.tag
ALLEXTERNALS = NO
EXTERNAL_GROUPS = YES
EXTERNAL_PAGES = YES
HIDE_UNDOC_RELATIONS = NO
HAVE_DOT = @CMAKECONFIG_HAVE_DOT@
DOT_NUM_THREADS = 0
DOT_FONTPATH =
DOT_COMMON_ATTR = "fontname=Helvetica,fontsize=10"
DOT_EDGE_ATTR = "labelfontname=Helvetica,labelfontsize=10"
DOT_NODE_ATTR = "shape=box,height=0.2,width=0.4"
CLASS_GRAPH = NO
COLLABORATION_GRAPH = YES
GROUP_GRAPHS = NO
UML_LOOK = YES
UML_LIMIT_NUM_FIELDS = 10
DOT_UML_DETAILS = NO
DOT_WRAP_THRESHOLD = 17
TEMPLATE_RELATIONS = YES
INCLUDE_GRAPH = YES
INCLUDED_BY_GRAPH = YES
CALL_GRAPH = YES
CALLER_GRAPH = NO
GRAPHICAL_HIERARCHY = YES
DIRECTORY_GRAPH = YES
DIR_GRAPH_MAX_DEPTH = 1
DOT_IMAGE_FORMAT = @CMAKECONFIG_DOT_IMAGE_FORMAT@
INTERACTIVE_SVG = @CMAKECONFIG_INTERACTIVE_SVG@
DOT_PATH = @CMAKECONFIG_DOT_PATH@
DOTFILE_DIRS =
DIAFILE_DIRS =
PLANTUML_JAR_PATH =
PLANTUML_CFG_FILE =
PLANTUML_INCLUDE_PATH =
DOT_GRAPH_MAX_NODES = 250
MAX_DOT_GRAPH_DEPTH = 0
DOT_MULTI_TARGETS = NO
GENERATE_LEGEND = YES
DOT_CLEANUP = YES
MSCGEN_TOOL =
MSCFILE_DIRS =

View File

@@ -0,0 +1,194 @@
<doxygenlayout version="1.0">
<!-- Generated by doxygen 1.8.6 -->
<!-- Navigation index tabs for HTML output -->
<navindex>
<tab type="mainpage" visible="yes" title=""/>
<tab type="pages" visible="yes" title="" intro=""/>
<tab type="modules" visible="yes" title="" intro=""/>
<tab type="namespaces" visible="yes" title="">
<tab type="namespacelist" visible="yes" title="" intro=""/>
</tab>
<tab type="classes" visible="yes" title="">
<tab type="classlist" visible="yes" title="" intro=""/>
<tab type="classindex" visible="$ALPHABETICAL_INDEX" title=""/>
</tab>
<tab type="files" visible="yes" title="">
<tab type="filelist" visible="yes" title="Files index" intro=""/>
<tab type="globals" visible="yes" title="Global objects" intro=""/>
</tab>
<tab type="examples" visible="yes" title="" intro=""/>
@CMAKE_DOXYGEN_JAVADOC_NODE@
</navindex>
<!-- Layout definition for a class page -->
<class>
<briefdescription visible="yes"/>
<includes visible="$SHOW_INCLUDE_FILES"/>
<inheritancegraph visible="$CLASS_GRAPH"/>
<collaborationgraph visible="$COLLABORATION_GRAPH"/>
<memberdecl>
<nestedclasses visible="yes" title=""/>
<publictypes title=""/>
<services title=""/>
<interfaces title=""/>
<publicslots title=""/>
<signals title=""/>
<publicmethods title=""/>
<publicstaticmethods title=""/>
<publicattributes title=""/>
<publicstaticattributes title=""/>
<protectedtypes title=""/>
<protectedslots title=""/>
<protectedmethods title=""/>
<protectedstaticmethods title=""/>
<protectedattributes title=""/>
<protectedstaticattributes title=""/>
<packagetypes title=""/>
<packagemethods title=""/>
<packagestaticmethods title=""/>
<packageattributes title=""/>
<packagestaticattributes title=""/>
<properties title=""/>
<events title=""/>
<privatetypes title=""/>
<privateslots title=""/>
<privatemethods title=""/>
<privatestaticmethods title=""/>
<privateattributes title=""/>
<privatestaticattributes title=""/>
<friends title=""/>
<related title="" subtitle=""/>
<membergroups visible="yes"/>
</memberdecl>
<detaileddescription title=""/>
<memberdef>
<inlineclasses title=""/>
<typedefs title=""/>
<enums title=""/>
<services title=""/>
<interfaces title=""/>
<constructors title=""/>
<functions title=""/>
<related title=""/>
<variables title=""/>
<properties title=""/>
<events title=""/>
</memberdef>
<allmemberslink visible="yes"/>
<usedfiles visible="$SHOW_USED_FILES"/>
<authorsection visible="yes"/>
</class>
<!-- Layout definition for a namespace page -->
<namespace>
<briefdescription visible="yes"/>
<memberdecl>
<nestednamespaces visible="yes" title=""/>
<constantgroups visible="yes" title=""/>
<classes visible="yes" title=""/>
<typedefs title=""/>
<enums title=""/>
<functions title=""/>
<variables title=""/>
<membergroups visible="yes"/>
</memberdecl>
<detaileddescription title=""/>
<memberdef>
<inlineclasses title=""/>
<typedefs title=""/>
<enums title=""/>
<functions title=""/>
<variables title=""/>
</memberdef>
<authorsection visible="yes"/>
</namespace>
<!-- Layout definition for a file page -->
<file>
<briefdescription visible="yes"/>
<includes visible="$SHOW_INCLUDE_FILES"/>
<includegraph visible="$INCLUDE_GRAPH"/>
<includedbygraph visible="$INCLUDED_BY_GRAPH"/>
<sourcelink visible="yes"/>
<memberdecl>
<classes visible="yes" title=""/>
<namespaces visible="yes" title=""/>
<constantgroups visible="yes" title=""/>
<defines title=""/>
<typedefs title=""/>
<enums title=""/>
<functions title=""/>
<variables title=""/>
<membergroups visible="yes"/>
</memberdecl>
<detaileddescription title=""/>
<memberdef>
<inlineclasses title=""/>
<defines title=""/>
<typedefs title=""/>
<enums title=""/>
<functions title=""/>
<variables title=""/>
</memberdef>
<authorsection/>
</file>
<!-- Layout definition for a group page -->
<group>
<briefdescription visible="yes"/>
<groupgraph visible="$GROUP_GRAPHS"/>
<memberdecl>
<nestedgroups visible="yes" title=""/>
</memberdecl>
<detaileddescription title=""/>
<memberdecl>
<dirs visible="yes" title=""/>
<files visible="yes" title=""/>
<namespaces visible="yes" title=""/>
<classes visible="yes" title=""/>
<defines title=""/>
<typedefs title=""/>
<enums title=""/>
<enumvalues title=""/>
<functions title=""/>
<variables title=""/>
<signals title=""/>
<publicslots title=""/>
<protectedslots title=""/>
<privateslots title=""/>
<events title=""/>
<properties title=""/>
<friends title=""/>
<membergroups visible="yes"/>
</memberdecl>
<memberdef>
<pagedocs/>
<inlineclasses title=""/>
<defines title=""/>
<typedefs title=""/>
<enums title=""/>
<enumvalues title=""/>
<functions title=""/>
<variables title=""/>
<signals title=""/>
<publicslots title=""/>
<protectedslots title=""/>
<privateslots title=""/>
<events title=""/>
<properties title=""/>
<friends title=""/>
</memberdef>
<authorsection visible="yes"/>
</group>
<!-- Layout definition for a directory page -->
<directory>
<briefdescription visible="yes"/>
<directorygraph visible="yes"/>
<memberdecl>
<dirs visible="yes"/>
<files visible="yes"/>
</memberdecl>
<detaileddescription title=""/>
</directory>
</doxygenlayout>

View File

@@ -0,0 +1,42 @@
By downloading, copying, installing or using the software you agree to this license.
If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2000-2020, Intel Corporation, all rights reserved.
Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved.
Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
Copyright (C) 2015-2016, OpenCV Foundation, all rights reserved.
Copyright (C) 2015-2016, Itseez Inc., all rights reserved.
Copyright (C) 2019-2020, Xperience AI, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are disclaimed.
In no event shall copyright holders or contributors be liable for any direct,
indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.

View File

@@ -0,0 +1,47 @@
Starting from OpenCV 4.5-pre (2020 August) OpenCV has changed the license from BSD to Apache 2. See https://opencv.org/blog/opencv-is-to-change-the-license-to-apache-2/ and https://github.com/opencv/opencv/wiki/OE-32.--Change-OpenCV-License-to-Apache-2 for details.
Here is the original OpenCV license:
------------------------------------------------------------------------------------
By downloading, copying, installing or using the software you agree to this license.
If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2000-2020, Intel Corporation, all rights reserved.
Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved.
Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
Copyright (C) 2015-2016, OpenCV Foundation, all rights reserved.
Copyright (C) 2015-2016, Itseez Inc., all rights reserved.
Copyright (C) 2019-2020, Xperience AI, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are disclaimed.
In no event shall copyright holders or contributors be liable for any direct,
indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
------------------------------------------------------------------------------------

Binary file not shown.

After

Width:  |  Height:  |  Size: 111 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 513 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

View File

@@ -0,0 +1,2 @@
# doxygen citelist build workaround
citelist : .*Unexpected new line character.*

View File

@@ -0,0 +1,4 @@
Frequently Asked Questions {#faq}
==========================
Compatibility page. FAQ migrated to the project [wiki](https://github.com/opencv/opencv/wiki/FAQ).

View File

@@ -0,0 +1,26 @@
<!-- HTML footer for doxygen 1.8.6-->
<!-- start footer part -->
<!--BEGIN GENERATE_TREEVIEW-->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
$navpath
<li class="footer">$generatedby
<a href="http://www.doxygen.org/index.html">
<img class="footer" src="$relpath^doxygen.png" alt="doxygen"/></a> $doxygenversion </li>
</ul>
</div>
<!--END GENERATE_TREEVIEW-->
<!--BEGIN !GENERATE_TREEVIEW-->
<hr class="footer"/><address class="footer"><small>
$generatedby &#160;<a href="http://www.doxygen.org/index.html">
<img class="footer" src="$relpath^doxygen.png" alt="doxygen"/>
</a> $doxygenversion
</small></address>
<!--END !GENERATE_TREEVIEW-->
<script type="text/javascript">
//<![CDATA[
addTutorialsButtons();
//]]>
</script>
</body>
</html>

View File

@@ -0,0 +1,58 @@
<!-- HTML header for doxygen 1.8.6-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen $doxygenversion"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
<link href="$relpath^opencv.ico" rel="shortcut icon" type="image/x-icon" />
<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="$relpath^jquery.js"></script>
<script type="text/javascript" src="$relpath^dynsections.js"></script>
<script type="text/javascript" src="$relpath^tutorial-utils.js"></script>
$treeview
$search
$mathjax
<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
$extrastylesheet
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<!--BEGIN TITLEAREA-->
<div id="titlearea">
<!--#include virtual="/google-search.html"-->
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<!--BEGIN PROJECT_LOGO-->
<td id="projectlogo"><img alt="Logo" src="$relpath^$projectlogo"/></td>
<!--END PROJECT_LOGO-->
<!--BEGIN PROJECT_NAME-->
<td style="padding-left: 0.5em;">
<div id="projectname">$projectname
<!--BEGIN PROJECT_NUMBER-->&#160;<span id="projectnumber">$projectnumber</span><!--END PROJECT_NUMBER-->
</div>
<!--BEGIN PROJECT_BRIEF--><div id="projectbrief">$projectbrief</div><!--END PROJECT_BRIEF-->
</td>
<!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME-->
<!--BEGIN PROJECT_BRIEF-->
<td style="padding-left: 0.5em;">
<div id="projectbrief">$projectbrief</div>
</td>
<!--END PROJECT_BRIEF-->
<!--END !PROJECT_NAME-->
<!--BEGIN DISABLE_INDEX-->
<!--BEGIN SEARCHENGINE-->
<td>$searchbox</td>
<!--END SEARCHENGINE-->
<!--END DISABLE_INDEX-->
</tr>
</tbody>
</table>
</div>
<!--END TITLEAREA-->
<!-- end header part -->

Binary file not shown.

After

Width:  |  Height:  |  Size: 247 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 176 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

View File

@@ -0,0 +1,22 @@
{
"extends": "google",
"parserOptions": {
"ecmaVersion": 6
},
"rules": {
"max-len": ["error", 100, {"ignoreUrls": true}],
"quotes": ["error", "single"],
"indent": ["error", 4, {"ArrayExpression": "first",
"ObjectExpression": "first",
"CallExpression": {"arguments": "first"},
"SwitchCase": 1}],
"require-jsdoc": "off",
"new-cap": "off"
},
"plugins": ["html"],
"settings": {
"html/javascript-mime-types": ["text/javascript", "text/code-snippet"],
"html/indent": "0",
"html/report-bad-indent": "error"
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

View File

@@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Padding Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Padding Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// You can try more different parameters
let s = new cv.Scalar(255, 0, 0, 255);
cv.copyMakeBorder(src, dst, 10, 10, 10, 10, cv.BORDER_CONSTANT, s);
cv.imshow('canvasOutput', dst);
src.delete();
dst.delete();
</script>
<script>
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image ROI Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image ROI Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// You can try more different parameters
let rect = new cv.Rect(100, 100, 200, 200);
dst = src.roi(rect);
cv.imshow('canvasOutput', dst);
src.delete();
dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,126 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Background Subtraction Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Background Subtraction Example</h2>
<p>
Click <b>Start/Stop</b> button to start or stop the camera capture.<br>
The <b>videoInput</b> is a &lt;video&gt; element used as input.
The <b>canvasOutput</b> is a &lt;canvas&gt; element used as output.<br>
The code of &lt;textarea&gt; will be executed when video is started.
You can modify the code to investigate more.
</p>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="80" id="codeEditor" spellcheck="false">
</textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="320" height="240" muted loop></video>
</td>
<td>
<canvas id="canvasOutput" width="320" height="240"></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let cap = new cv.VideoCapture(video);
let frame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let fgmask = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let fgbg = new cv.BackgroundSubtractorMOG2(500, 16, true);
const FPS = 30;
function processVideo() {
try {
if (!streaming) {
// clean and stop.
frame.delete(); fgmask.delete(); fgbg.delete();
return;
}
let begin = Date.now();
// start processing.
cap.read(frame);
fgbg.apply(frame, fgmask);
cv.imshow('canvasOutput', fgmask);
// schedule the next one.
let delay = 1000/FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
} catch (err) {
utils.printError(err);
}
};
// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');
let canvasOutput = document.getElementById('canvasOutput');
let canvasContext = canvasOutput.getContext('2d');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
videoInput.play().then(() => {
onVideoStarted();
});
} else {
videoInput.pause();
videoInput.currentTime = 0;
onVideoStopped();
}
});
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
utils.executeCode('codeEditor');
}
function onVideoStopped() {
streaming = false;
canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
startAndStop.innerText = 'Start';
}
utils.loadOpenCv(() => {
videoInput.addEventListener('canplay', () => {
startAndStop.removeAttribute('disabled');
});
videoInput.src = 'box.mp4';
});
</script>
</body>
</html>

View File

@@ -0,0 +1,172 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>CamShift Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>CamShift Example</h2>
<p>
Click <b>Start/Stop</b> button to start or stop the video.<br>
The <b>videoInput</b> is a &lt;video&gt; element used as CamShift input.
The <b>canvasOutput</b> is a &lt;canvas&gt; element used as CamShift output.<br>
The code of &lt;textarea&gt; will be executed when video is started.
You can modify the code to investigate more.
</p>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false">
</textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="320" height="240" muted loop></video>
</td>
<td>
<canvas id="canvasOutput" width="320" height="240"></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let cap = new cv.VideoCapture(video);
// take first frame of the video
let frame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
cap.read(frame);
// hardcode the initial location of window
let trackWindow = new cv.Rect(150, 60, 63, 125);
// set up the ROI for tracking
let roi = frame.roi(trackWindow);
let hsvRoi = new cv.Mat();
cv.cvtColor(roi, hsvRoi, cv.COLOR_RGBA2RGB);
cv.cvtColor(hsvRoi, hsvRoi, cv.COLOR_RGB2HSV);
let mask = new cv.Mat();
let lowScalar = new cv.Scalar(30, 30, 0);
let highScalar = new cv.Scalar(180, 180, 180);
let low = new cv.Mat(hsvRoi.rows, hsvRoi.cols, hsvRoi.type(), lowScalar);
let high = new cv.Mat(hsvRoi.rows, hsvRoi.cols, hsvRoi.type(), highScalar);
cv.inRange(hsvRoi, low, high, mask);
let roiHist = new cv.Mat();
let hsvRoiVec = new cv.MatVector();
hsvRoiVec.push_back(hsvRoi);
cv.calcHist(hsvRoiVec, [0], mask, roiHist, [180], [0, 180]);
cv.normalize(roiHist, roiHist, 0, 255, cv.NORM_MINMAX);
// delete useless mats.
roi.delete(); hsvRoi.delete(); mask.delete(); low.delete(); high.delete(); hsvRoiVec.delete();
// Setup the termination criteria, either 10 iteration or move by at least 1 pt
let termCrit = new cv.TermCriteria(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1);
let hsv = new cv.Mat(video.height, video.width, cv.CV_8UC3);
let hsvVec = new cv.MatVector();
hsvVec.push_back(hsv);
let dst = new cv.Mat();
let trackBox = null;
const FPS = 30;
function processVideo() {
try {
if (!streaming) {
// clean and stop.
frame.delete(); dst.delete(); hsvVec.delete(); roiHist.delete(); hsv.delete();
return;
}
let begin = Date.now();
// start processing.
cap.read(frame);
cv.cvtColor(frame, hsv, cv.COLOR_RGBA2RGB);
cv.cvtColor(hsv, hsv, cv.COLOR_RGB2HSV);
cv.calcBackProject(hsvVec, [0], roiHist, dst, [0, 180], 1);
// apply camshift to get the new location
[trackBox, trackWindow] = cv.CamShift(dst, trackWindow, termCrit);
// Draw it on image
let pts = cv.rotatedRectPoints(trackBox);
cv.line(frame, pts[0], pts[1], [255, 0, 0, 255], 3);
cv.line(frame, pts[1], pts[2], [255, 0, 0, 255], 3);
cv.line(frame, pts[2], pts[3], [255, 0, 0, 255], 3);
cv.line(frame, pts[3], pts[0], [255, 0, 0, 255], 3);
cv.imshow('canvasOutput', frame);
// schedule the next one.
let delay = 1000/FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
} catch (err) {
utils.printError(err);
}
};
// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');
let canvasOutput = document.getElementById('canvasOutput');
let canvasContext = canvasOutput.getContext('2d');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
videoInput.play().then(() => {
onVideoStarted();
});
} else {
videoInput.pause();
videoInput.currentTime = 0;
onVideoStopped();
}
});
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
utils.executeCode('codeEditor');
}
function onVideoStopped() {
streaming = false;
canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
startAndStop.innerText = 'Start';
}
utils.loadOpenCv(() => {
videoInput.addEventListener('canplay', () => {
startAndStop.removeAttribute('disabled');
});
videoInput.src = 'cup.mp4';
});
</script>
</body>
</html>

View File

@@ -0,0 +1,68 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Canny Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Canny Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGB2GRAY, 0);
// You can try more different parameters
cv.Canny(src, dst, 50, 100, 3, false);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,67 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Convert Color Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Convert Color Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// You can try more different parameters
cv.cvtColor(src, dst, cv.COLOR_RGBA2GRAY, 0);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image InRange Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image InRange Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let low = new cv.Mat(src.rows, src.cols, src.type(), [0, 0, 0, 0]);
let high = new cv.Mat(src.rows, src.cols, src.type(), [150, 150, 150, 255]);
// You can try more different parameters
cv.inRange(src, low, high, dst);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); low.delete(); high.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,86 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image ApproxPolyDP Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image ApproxPolyDP Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 100, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
let poly = new cv.MatVector();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
// approximates each contour to polygon
for (let i = 0; i < contours.size(); ++i) {
let tmp = new cv.Mat();
let cnt = contours.get(i);
// You can try more different parameters
cv.approxPolyDP(cnt, tmp, 3, true);
poly.push_back(tmp);
cnt.delete(); tmp.delete();
}
// draw contours with random Scalar
for (let i = 0; i < contours.size(); ++i) {
let color = new cv.Scalar(Math.round(Math.random() * 255), Math.round(Math.random() * 255),
Math.round(Math.random() * 255));
cv.drawContours(dst, poly, i, color, 1, 8, hierarchy, 0);
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); hierarchy.delete(); contours.delete(); poly.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,63 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Area Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Area Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<div>
<canvas id="canvasInput"></canvas>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</div>
<div>
<p><strong>The area is: </strong><span id="areaOutput"></span></p>
</div>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let cnt = contours.get(20);
// You can try more different parameters
let area = cv.contourArea(cnt, false);
areaOutput.innerHTML = area;
src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); cnt.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,79 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Bounding Rect Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Bounding Rect Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let cnt = contours.get(0);
// You can try more different parameters
let rect = cv.boundingRect(cnt);
let contoursColor = new cv.Scalar(255, 255, 255);
let rectangleColor = new cv.Scalar(255, 0, 0);
cv.drawContours(dst, contours, 0, contoursColor, 1, 8, hierarchy, 100);
let point1 = new cv.Point(rect.x, rect.y);
let point2 = new cv.Point(rect.x + rect.width, rect.y + rect.height);
cv.rectangle(dst, point1, point2, rectangleColor, 2, cv.LINE_AA, 0);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); cnt.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,86 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Convex Hull Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Convex Hull Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 100, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
let hull = new cv.MatVector();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
// approximates each contour to convex hull
for (let i = 0; i < contours.size(); ++i) {
let tmp = new cv.Mat();
let cnt = contours.get(i);
// You can try more different parameters
cv.convexHull(cnt, tmp, false, true);
hull.push_back(tmp);
cnt.delete(); tmp.delete();
}
// draw contours with random Scalar
for (let i = 0; i < contours.size(); ++i) {
let colorHull = new cv.Scalar(Math.round(Math.random() * 255), Math.round(Math.random() * 255),
Math.round(Math.random() * 255));
cv.drawContours(dst, hull, i, colorHull, 1, 8, hierarchy, 0);
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); hierarchy.delete(); contours.delete(); hull.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,77 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Fit Ellipse Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Fit Ellipse Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let cnt = contours.get(0);
// You can try more different parameters
let rotatedRect = cv.fitEllipse(cnt);
let contoursColor = new cv.Scalar(255, 255, 255);
let ellipseColor = new cv.Scalar(255, 0, 0);
cv.drawContours(dst, contours, 0, contoursColor, 1, 8, hierarchy, 100);
cv.ellipse1(dst, rotatedRect, ellipseColor, 1, cv.LINE_8);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); cnt.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,86 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Fit Line Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Fit Line Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
let line = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let cnt = contours.get(0);
// You can try more different parameters
cv.fitLine(cnt, line, cv.DIST_L2, 0, 0.01, 0.01);
let contoursColor = new cv.Scalar(255, 255, 255);
let lineColor = new cv.Scalar(255, 0, 0);
cv.drawContours(dst, contours, 0, contoursColor, 1, 8, hierarchy, 100);
let vx = line.data32F[0];
let vy = line.data32F[1];
let x = line.data32F[2];
let y = line.data32F[3];
let lefty = Math.round((-x * vy / vx) + y);
let righty = Math.round(((src.cols - x) * vy / vx) + y);
let point1 = new cv.Point(src.cols - 1, righty);
let point2 = new cv.Point(0, lefty);
cv.line(dst, point1, point2, lineColor, 2, cv.LINE_AA, 0);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); line.delete(); cnt.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,81 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Min Area Rect Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Min Area Rect Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let cnt = contours.get(0);
// You can try more different parameters
let rotatedRect = cv.minAreaRect(cnt);
let vertices = cv.RotatedRect.points(rotatedRect);
let contoursColor = new cv.Scalar(255, 255, 255);
let rectangleColor = new cv.Scalar(255, 0, 0);
cv.drawContours(dst, contours, 0, contoursColor, 1, 8, hierarchy, 100);
// draw rotatedRect
for (let i = 0; i < 4; i++) {
cv.line(dst, vertices[i], vertices[(i + 1) % 4], rectangleColor, 2, cv.LINE_AA, 0);
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); cnt.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,77 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Min Enclosing Circle Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Min Enclosing Circle Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let cnt = contours.get(0);
// You can try more different parameters
let circle = cv.minEnclosingCircle(cnt);
let contoursColor = new cv.Scalar(255, 255, 255);
let circleColor = new cv.Scalar(255, 0, 0);
cv.drawContours(dst, contours, 0, contoursColor, 1, 8, hierarchy, 100);
cv.circle(dst, circle.center, circle.radius, circleColor);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); cnt.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,62 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Moments Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Moments Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<div>
<canvas id="canvasInput"></canvas>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</div>
<div>
<p><strong>The m00 is: </strong><span id="momentsOutput"></span></p>
</div>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let cnt = contours.get(0);
// You can try more different parameters
let Moments = cv.moments(cnt, false);
momentsOutput.innerHTML = Moments.m00;
src.delete(); dst.delete(); contours.delete(); hierarchy.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,62 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Perimeter Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Perimeter Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<div>
<canvas id="canvasInput"></canvas>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</div>
<div>
<p><strong>The perimeter is: </strong><span id="perimeterOutput"></span></p>
</div>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let cnt = contours.get(20);
// You can try more different parameters
let perimeter = cv.arcLength(cnt, true);
perimeterOutput.innerHTML = perimeter;
src.delete(); dst.delete(); contours.delete(); hierarchy.delete(); cnt.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,68 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Transpose Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Transpose Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 120, 200, cv.THRESH_BINARY);
cv.transpose(src, dst);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,77 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Contours Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Contours Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 120, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
// You can try more different parameters
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
// draw contours with random Scalar
for (let i = 0; i < contours.size(); ++i) {
let color = new cv.Scalar(Math.round(Math.random() * 255), Math.round(Math.random() * 255),
Math.round(Math.random() * 255));
cv.drawContours(dst, contours, i, color, 1, cv.LINE_8, hierarchy, 100);
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); contours.delete(); hierarchy.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,87 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Convexity Defects Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Convexity Defects Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 100, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let hull = new cv.Mat();
let defect = new cv.Mat();
let cnt = contours.get(0);
let lineColor = new cv.Scalar(255, 0, 0);
let circleColor = new cv.Scalar(255, 255, 255);
cv.convexHull(cnt, hull, false, false);
cv.convexityDefects(cnt, hull, defect);
for (let i = 0; i < defect.rows; ++i) {
let start = new cv.Point(cnt.data32S[defect.data32S[i * 4] * 2],
cnt.data32S[defect.data32S[i * 4] * 2 + 1]);
let end = new cv.Point(cnt.data32S[defect.data32S[i * 4 + 1] * 2],
cnt.data32S[defect.data32S[i * 4 + 1] * 2 + 1]);
let far = new cv.Point(cnt.data32S[defect.data32S[i * 4 + 2] * 2],
cnt.data32S[defect.data32S[i * 4 + 2] * 2 + 1]);
cv.line(dst, start, end, lineColor, 2, cv.LINE_AA, 0);
cv.circle(dst, far, 3, circleColor, -1);
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); hierarchy.delete(); contours.delete(); hull.delete(); defect.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,82 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Match Shape Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Match Shape Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<div>
<p><strong>The result is: </strong><span id="matchShapesOutput"></span></p>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(src, src, 177, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(src, contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE);
let contourID0 = 10;
let contourID1 = 5;
let color0 = new cv.Scalar(255, 0, 0);
let color1 = new cv.Scalar(0, 0, 255);
// You can try more different parameters
let result = cv.matchShapes(contours.get(contourID0), contours.get(contourID1), 1, 0);
matchShapesOutput.innerHTML = result;
cv.drawContours(dst, contours, contourID0, color0, 1, cv.LINE_8, hierarchy, 100);
cv.drawContours(dst, contours, contourID1, color1, 1, cv.LINE_8, hierarchy, 100);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); contours.delete(); hierarchy.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('coins.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,119 @@
getBlobFromImage = function(inputSize, mean, std, swapRB, image) {
let mat;
if (typeof(image) === 'string') {
mat = cv.imread(image);
} else {
mat = image;
}
let matC3 = new cv.Mat(mat.matSize[0], mat.matSize[1], cv.CV_8UC3);
cv.cvtColor(mat, matC3, cv.COLOR_RGBA2BGR);
let input = cv.blobFromImage(matC3, std, new cv.Size(inputSize[0], inputSize[1]),
new cv.Scalar(mean[0], mean[1], mean[2]), swapRB);
matC3.delete();
return input;
}
loadLables = async function(labelsUrl) {
let response = await fetch(labelsUrl);
let label = await response.text();
label = label.split('\n');
return label;
}
loadModel = async function(e) {
return new Promise((resolve) => {
let file = e.target.files[0];
let path = file.name;
let reader = new FileReader();
reader.readAsArrayBuffer(file);
reader.onload = function(ev) {
if (reader.readyState === 2) {
let buffer = reader.result;
let data = new Uint8Array(buffer);
cv.FS_createDataFile('/', path, data, true, false, false);
resolve(path);
}
}
});
}
getTopClasses = function(probs, labels, topK = 3) {
probs = Array.from(probs);
let indexes = probs.map((prob, index) => [prob, index]);
let sorted = indexes.sort((a, b) => {
if (a[0] === b[0]) {return 0;}
return a[0] < b[0] ? -1 : 1;
});
sorted.reverse();
let classes = [];
for (let i = 0; i < topK; ++i) {
let prob = sorted[i][0];
let index = sorted[i][1];
let c = {
label: labels[index],
prob: (prob * 100).toFixed(2)
}
classes.push(c);
}
return classes;
}
loadImageToCanvas = function(e, canvasId) {
let files = e.target.files;
let imgUrl = URL.createObjectURL(files[0]);
let canvas = document.getElementById(canvasId);
let ctx = canvas.getContext('2d');
let img = new Image();
img.crossOrigin = 'anonymous';
img.src = imgUrl;
img.onload = function() {
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
};
}
drawInfoTable = async function(jsonUrl, divId) {
let response = await fetch(jsonUrl);
let json = await response.json();
let appendix = document.getElementById(divId);
for (key of Object.keys(json)) {
let h3 = document.createElement('h3');
h3.textContent = key + " model";
appendix.appendChild(h3);
let table = document.createElement('table');
let head_tr = document.createElement('tr');
for (head of Object.keys(json[key][0])) {
let th = document.createElement('th');
th.textContent = head;
th.style.border = "1px solid black";
head_tr.appendChild(th);
}
table.appendChild(head_tr)
for (model of json[key]) {
let tr = document.createElement('tr');
for (params of Object.keys(model)) {
let td = document.createElement('td');
td.style.border = "1px solid black";
if (params !== "modelUrl" && params !== "configUrl" && params !== "labelsUrl") {
td.textContent = model[params];
tr.appendChild(td);
} else {
let a = document.createElement('a');
let link = document.createTextNode('link');
a.append(link);
a.href = model[params];
td.appendChild(a);
tr.appendChild(td);
}
}
table.appendChild(tr);
}
table.style.width = "800px";
table.style.borderCollapse = "collapse";
appendix.appendChild(table);
}
}

View File

@@ -0,0 +1,70 @@
body, div, p {
font: 400 14px/22px Roboto,sans-serif;
}
canvas, img, video {
border: 1px solid black;
}
td {
padding: 10px 0px 0px 10px;
text-align: center;
}
button {
display: inline-block;
color: #fff;
background-color: #337ab7;
border-color: #2e6da4;
padding: 6px 12px;
margin-bottom: 0;
font-size: 14px;
font-weight: bold;
text-align: center;
white-space: nowrap;
vertical-align: middle;
-ms-touch-action: manipulation;
touch-action: manipulation;
cursor: pointer;
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
background-image: none;
border: 1px solid transparent;
border-radius: 4px;
}
button[disabled] {
cursor: not-allowed;
filter: alpha(opacity=65);
-webkit-box-shadow: none;
box-shadow: none;
opacity: .65;
}
.control {
margin-bottom: 3px;
}
.err {
color: red;
font-weight: bold;
}
.caption {
margin: 0;
font-weight: bold;
}
.code {
padding: 4px 6px;
margin: 4px 8px 4px 2px;
background-color: #FBFCFD;
border: 1px solid #C4CFE5;
font-family: monospace, fixed;
font-size: 13px;
min-height: 13px;
line-height: 1.0;
text-wrap: unrestricted;
padding-bottom: 0px;
margin: 0px;
}
.hidden {
display: none;
}
.small {
max-width: 300px;
}

View File

@@ -0,0 +1,100 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Face Detection Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Face Detection Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let gray = new cv.Mat();
cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
let faces = new cv.RectVector();
let eyes = new cv.RectVector();
let faceCascade = new cv.CascadeClassifier();
let eyeCascade = new cv.CascadeClassifier();
// load pre-trained classifiers
faceCascade.load('haarcascade_frontalface_default.xml');
eyeCascade.load('haarcascade_eye.xml');
// detect faces
let msize = new cv.Size(0, 0);
faceCascade.detectMultiScale(gray, faces, 1.1, 3, 0, msize, msize);
for (let i = 0; i < faces.size(); ++i) {
let roiGray = gray.roi(faces.get(i));
let roiSrc = src.roi(faces.get(i));
let point1 = new cv.Point(faces.get(i).x, faces.get(i).y);
let point2 = new cv.Point(faces.get(i).x + faces.get(i).width,
faces.get(i).y + faces.get(i).height);
cv.rectangle(src, point1, point2, [255, 0, 0, 255]);
// detect eyes in face ROI
eyeCascade.detectMultiScale(roiGray, eyes);
for (let j = 0; j < eyes.size(); ++j) {
let point1 = new cv.Point(eyes.get(j).x, eyes.get(j).y);
let point2 = new cv.Point(eyes.get(j).x + eyes.get(j).width,
eyes.get(j).y + eyes.get(j).height);
cv.rectangle(roiSrc, point1, point2, [0, 0, 255, 255]);
}
roiGray.delete(); roiSrc.delete();
}
cv.imshow('canvasOutput', src);
src.delete(); gray.delete(); faceCascade.delete();
eyeCascade.delete(); faces.delete(); eyes.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
let eyeCascadeFile = 'haarcascade_eye.xml';
utils.createFileFromUrl(eyeCascadeFile, eyeCascadeFile, () => {
let faceCascadeFile = 'haarcascade_frontalface_default.xml';
utils.createFileFromUrl(faceCascadeFile, faceCascadeFile, () => {
tryIt.removeAttribute('disabled');
});
});
});
</script>
</body>
</html>

View File

@@ -0,0 +1,142 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Face Detection Camera Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Face Detection Camera Example</h2>
<p>
Click <b>Start/Stop</b> button to start or stop the camera capture.<br>
The <b>videoInput</b> is a &lt;video&gt; element used as face detector input.
The <b>canvasOutput</b> is a &lt;canvas&gt; element used as face detector output.<br>
The code of &lt;textarea&gt; will be executed when video is started.
You can modify the code to investigate more.
</p>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="80" id="codeEditor" spellcheck="false">
</textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width=320 height=240></video>
</td>
<td>
<canvas id="canvasOutput" width=320 height=240></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let src = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let dst = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let gray = new cv.Mat();
let cap = new cv.VideoCapture(video);
let faces = new cv.RectVector();
let classifier = new cv.CascadeClassifier();
// load pre-trained classifiers
classifier.load('haarcascade_frontalface_default.xml');
const FPS = 30;
function processVideo() {
try {
if (!streaming) {
// clean and stop.
src.delete();
dst.delete();
gray.delete();
faces.delete();
classifier.delete();
return;
}
let begin = Date.now();
// start processing.
cap.read(src);
src.copyTo(dst);
cv.cvtColor(dst, gray, cv.COLOR_RGBA2GRAY, 0);
// detect faces.
classifier.detectMultiScale(gray, faces, 1.1, 3, 0);
// draw faces.
for (let i = 0; i < faces.size(); ++i) {
let face = faces.get(i);
let point1 = new cv.Point(face.x, face.y);
let point2 = new cv.Point(face.x + face.width, face.y + face.height);
cv.rectangle(dst, point1, point2, [255, 0, 0, 255]);
}
cv.imshow('canvasOutput', dst);
// schedule the next one.
let delay = 1000/FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
} catch (err) {
utils.printError(err);
}
};
// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');
let canvasOutput = document.getElementById('canvasOutput');
let canvasContext = canvasOutput.getContext('2d');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
utils.startCamera('qvga', onVideoStarted, 'videoInput');
} else {
utils.stopCamera();
onVideoStopped();
}
});
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.width = videoInput.videoWidth;
videoInput.height = videoInput.videoHeight;
utils.executeCode('codeEditor');
}
function onVideoStopped() {
streaming = false;
canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
startAndStop.innerText = 'Start';
}
utils.loadOpenCv(() => {
let faceCascadeFile = 'haarcascade_frontalface_default.xml';
utils.createFileFromUrl(faceCascadeFile, faceCascadeFile, () => {
startAndStop.removeAttribute('disabled');
});
});
</script>
</body>
</html>

View File

@@ -0,0 +1,68 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Gaussian Blur Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Gaussian Blur Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let ksize = new cv.Size(3, 3);
// You can try more different parameters
cv.GaussianBlur(src, dst, ksize, 0, 0, cv.BORDER_DEFAULT);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,68 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Bilateral Filter Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Bilateral Filter Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGBA2RGB, 0);
// You can try more different parameters
cv.bilateralFilter(src, dst, 9, 75, 75, cv.BORDER_DEFAULT);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,70 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Blur Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Blur Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let ksize = new cv.Size(3, 3);
let anchor = new cv.Point(-1, -1);
// You can try more different parameters
cv.blur(src, dst, ksize, anchor, cv.BORDER_DEFAULT);
// cv.boxFilter(src, dst, -1, ksize, anchor, true, cv.BORDER_DEFAULT)
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Filter Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Filter Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let M = cv.Mat.eye(3, 3, cv.CV_32FC1);
let anchor = new cv.Point(-1, -1);
// You can try more different parameters
cv.filter2D(src, dst, cv.CV_8U, M, anchor, 0, cv.BORDER_DEFAULT);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,67 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Median Blur Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Median Blur Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// You can try more different parameters
cv.medianBlur(src, dst, 5);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,128 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image DFT Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image DFT Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
// get optimal size of DFT
let optimalRows = cv.getOptimalDFTSize(src.rows);
let optimalCols = cv.getOptimalDFTSize(src.cols);
let s0 = cv.Scalar.all(0);
let padded = new cv.Mat();
cv.copyMakeBorder(src, padded, 0, optimalRows - src.rows, 0,
optimalCols - src.cols, cv.BORDER_CONSTANT, s0);
// use cv.MatVector to distribute space for real part and imaginary part
let plane0 = new cv.Mat();
padded.convertTo(plane0, cv.CV_32F);
let planes = new cv.MatVector();
let complexI = new cv.Mat();
let plane1 = new cv.Mat.zeros(padded.rows, padded.cols, cv.CV_32F);
planes.push_back(plane0);
planes.push_back(plane1);
cv.merge(planes, complexI);
// in-place dft transform
cv.dft(complexI, complexI);
// compute log(1 + sqrt(Re(DFT(img))**2 + Im(DFT(img))**2))
cv.split(complexI, planes);
cv.magnitude(planes.get(0), planes.get(1), planes.get(0));
let mag = planes.get(0);
let m1 = new cv.Mat.ones(mag.rows, mag.cols, mag.type());
cv.add(mag, m1, mag);
cv.log(mag, mag);
// crop the spectrum, if it has an odd number of rows or columns
let rect = new cv.Rect(0, 0, mag.cols & -2, mag.rows & -2);
mag = mag.roi(rect);
// rearrange the quadrants of Fourier image
// so that the origin is at the image center
let cx = mag.cols / 2;
let cy = mag.rows / 2;
let tmp = new cv.Mat();
let rect0 = new cv.Rect(0, 0, cx, cy);
let rect1 = new cv.Rect(cx, 0, cx, cy);
let rect2 = new cv.Rect(0, cy, cx, cy);
let rect3 = new cv.Rect(cx, cy, cx, cy);
let q0 = mag.roi(rect0);
let q1 = mag.roi(rect1);
let q2 = mag.roi(rect2);
let q3 = mag.roi(rect3);
// exchange 1 and 4 quadrants
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
// exchange 2 and 3 quadrants
q1.copyTo(tmp);
q2.copyTo(q1);
tmp.copyTo(q2);
// The pixel value of cv.CV_32S type image ranges from 0 to 1.
cv.normalize(mag, mag, 0, 1, cv.NORM_MINMAX);
cv.imshow('canvasOutput', mag);
src.delete(); padded.delete(); planes.delete(); complexI.delete(); m1.delete(); tmp.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,74 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Get Affine Transform Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Get Affine Transform Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// (data32F[0], data32F[1]) is the first point
// (data32F[2], data32F[3]) is the sescond point
// (data32F[4], data32F[5]) is the third point
let srcTri = cv.matFromArray(3, 1, cv.CV_32FC2, [0, 0, 0, 1, 1, 0]);
let dstTri = cv.matFromArray(3, 1, cv.CV_32FC2, [0.6, 0.2, 0.1, 1.3, 1.5, 0.3]);
let dsize = new cv.Size(src.rows, src.cols);
let M = cv.getAffineTransform(srcTri, dstTri);
// You can try more different parameters
cv.warpAffine(src, dst, M, dsize, cv.INTER_LINEAR, cv.BORDER_CONSTANT, new cv.Scalar());
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete(); srcTri.delete(); dstTri.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,68 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Resize Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Resize Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let dsize = new cv.Size(300, 300);
// You can try more different parameters
cv.resize(src, dst, dsize, 0, 0, cv.INTER_AREA);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,70 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Rotate Transform Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Rotate Transform Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let dsize = new cv.Size(src.rows, src.cols);
let center = new cv.Point(src.cols / 2, src.rows / 2);
// You can try more different parameters
let M = cv.getRotationMatrix2D(center, 45, 1);
cv.warpAffine(src, dst, M, dsize, cv.INTER_LINEAR, cv.BORDER_CONSTANT, new cv.Scalar());
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Affine Transform Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Affine Transform Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let M = cv.matFromArray(2, 3, cv.CV_64FC1, [1, 0, 50, 0, 1, 100]);
let dsize = new cv.Size(src.rows, src.cols);
// You can try more different parameters
cv.warpAffine(src, dst, M, dsize, cv.INTER_LINEAR, cv.BORDER_CONSTANT, new cv.Scalar());
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,75 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Perspectiv Transform Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Perspectiv Transform Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let dsize = new cv.Size(src.rows, src.cols);
// (data32F[0], data32F[1]) is the first point
// (data32F[2], data32F[3]) is the sescond point
// (data32F[4], data32F[5]) is the third point
// (data32F[6], data32F[7]) is the fourth point
let srcTri = cv.matFromArray(4, 1, cv.CV_32FC2, [56, 65, 368, 52, 28, 387, 389, 390]);
let dstTri = cv.matFromArray(4, 1, cv.CV_32FC2, [0, 0, 300, 0, 0, 300, 300, 300]);
let M = cv.getPerspectiveTransform(srcTri, dstTri);
// You can try more different parameters
cv.warpPerspective(src, dst, M, dsize, cv.INTER_LINEAR, cv.BORDER_CONSTANT, new cv.Scalar());
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete(); srcTri.delete(); dstTri.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,85 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image GrabCut Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image GrabCut Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
cv.cvtColor(src, src, cv.COLOR_RGBA2RGB, 0);
let mask = new cv.Mat();
let bgdModel = new cv.Mat();
let fgdModel = new cv.Mat();
let rect = new cv.Rect(50, 50, 260, 280);
cv.grabCut(src, mask, rect, bgdModel, fgdModel, 1, cv.GC_INIT_WITH_RECT);
// draw foreground
for (let i = 0; i < src.rows; i++) {
for (let j = 0; j < src.cols; j++) {
if (mask.ucharPtr(i, j)[0] == 0 || mask.ucharPtr(i, j)[0] == 2) {
src.ucharPtr(i, j)[0] = 0;
src.ucharPtr(i, j)[1] = 0;
src.ucharPtr(i, j)[2] = 0;
}
}
}
// draw grab rect
let color = new cv.Scalar(0, 0, 255);
let point1 = new cv.Point(rect.x, rect.y);
let point2 = new cv.Point(rect.x + rect.width, rect.y + rect.height);
cv.rectangle(src, point1, point2, color);
cv.imshow('canvasOutput', src);
src.delete(); mask.delete(); bgdModel.delete(); fgdModel.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,68 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Laplacian Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Laplacian Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGB2GRAY, 0);
// You can try more different parameters
cv.Laplacian(src, dst, cv.CV_8U, 1, 1, 0, cv.BORDER_DEFAULT);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,79 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Sobel Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Sobel Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b>, <b>canvasOutputx</b> and <b>canvasOutputy</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput" class="small"></canvas>
</td>
<td>
<canvas id="canvasOutputx" class="small"></canvas>
</td>
<td>
<canvas id="canvasOutputy" class="small"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutputx</div>
</td>
<td>
<div class="caption">canvasOutputy</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dstx = new cv.Mat();
let dsty = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGB2GRAY, 0);
// You can try more different parameters
cv.Sobel(src, dstx, cv.CV_8U, 1, 0, 3, 1, 0, cv.BORDER_DEFAULT);
cv.Sobel(src, dsty, cv.CV_8U, 0, 1, 3, 1, 0, cv.BORDER_DEFAULT);
// cv.Scharr(src, dstx, cv.CV_8U, 1, 0, 1, 0, cv.BORDER_DEFAULT);
// cv.Scharr(src, dsty, cv.CV_8U, 0, 1, 1, 0, cv.BORDER_DEFAULT);
cv.imshow('canvasOutputx', dstx);
cv.imshow('canvasOutputy', dsty);
src.delete(); dstx.delete(); dsty.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,78 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image AbsSobel Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image AbsSobel Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b>, <b>canvasOutput8U</b> and <b>canvasOutput64F</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput" class="small"></canvas>
</td>
<td>
<canvas id="canvasOutput8U" class="small"></canvas>
</td>
<td>
<canvas id="canvasOutput64F" class="small"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput8U</div>
</td>
<td>
<div class="caption">canvasOutput64F</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dstx = new cv.Mat();
let absDstx = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGB2GRAY, 0);
// You can try more different parameters
cv.Sobel(src, dstx, cv.CV_8U, 1, 0, 3, 1, 0, cv.BORDER_DEFAULT);
cv.Sobel(src, absDstx, cv.CV_64F, 1, 0, 3, 1, 0, cv.BORDER_DEFAULT);
cv.convertScaleAbs(absDstx, absDstx, 1, 0);
cv.imshow('canvasOutput8U', dstx);
cv.imshow('canvasOutput64F', absDstx);
src.delete(); dstx.delete(); absDstx.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,90 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Back Project Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Back Project Example</h2>
<p>
&lt;canvas&gt; elements named <b>srcCanvasInput</b>, <b>dstCanvasInput</b> and <b>canvasInput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="srcCanvasInput" class="small"></canvas>
</td>
<td>
<canvas id="dstCanvasInput" class="small"></canvas>
</td>
<td>
<canvas id="canvasOutput" class="small"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">srcCanvasInput <input type="file" id="srcFileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">dstCanvasInput <input type="file" id="dstFileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('srcCanvasInput');
let dst = cv.imread('dstCanvasInput');
cv.cvtColor(src, src, cv.COLOR_RGB2HSV, 0);
cv.cvtColor(dst, dst, cv.COLOR_RGB2HSV, 0);
let srcVec = new cv.MatVector();
let dstVec = new cv.MatVector();
srcVec.push_back(src); dstVec.push_back(dst);
let backproj = new cv.Mat();
let none = new cv.Mat();
let mask = new cv.Mat();
let hist = new cv.Mat();
let channels = [0];
let histSize = [50];
let ranges = [0, 180];
let accumulate = false;
cv.calcHist(srcVec, channels, mask, hist, histSize, ranges, accumulate);
cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX, -1, none);
cv.calcBackProject(dstVec, channels, hist, backproj, ranges, 1);
cv.imshow('canvasOutput', backproj);
src.delete(); dst.delete(); srcVec.delete(); dstVec.delete();
backproj.delete(); mask.delete(); hist.delete(); none.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('handSrc.jpg', 'srcCanvasInput');
utils.loadImageToCanvas('handDst.jpg', 'dstCanvasInput');
utils.addFileInputHandler('srcFileInput', 'srcCanvasInput');
utils.addFileInputHandler('dstFileInput', 'dstCanvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,88 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Histogram Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Histogram Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
let srcVec = new cv.MatVector();
srcVec.push_back(src);
let accumulate = false;
let channels = [0];
let histSize = [256];
let ranges = [0, 255];
let hist = new cv.Mat();
let mask = new cv.Mat();
let color = new cv.Scalar(255, 255, 255);
let scale = 2;
// You can try more different parameters
cv.calcHist(srcVec, channels, mask, hist, histSize, ranges, accumulate);
let result = cv.minMaxLoc(hist, mask);
let max = result.maxVal;
let dst = new cv.Mat.zeros(src.rows, histSize[0] * scale,
cv.CV_8UC3);
// draw histogram
for (let i = 0; i < histSize[0]; i++) {
let binVal = hist.data32F[i] * src.rows / max;
let point1 = new cv.Point(i * scale, src.rows - 1);
let point2 = new cv.Point((i + 1) * scale - 1, src.rows - binVal);
cv.rectangle(dst, point1, point2, color, cv.FILLED);
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); srcVec.delete(); mask.delete(); hist.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,73 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image CLAHE Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image CLAHE Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let equalDst = new cv.Mat();
let claheDst = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.equalizeHist(src, equalDst);
let tileGridSize = new cv.Size(8, 8);
// You can try more different parameters
let clahe = new cv.CLAHE(40, tileGridSize);
clahe.apply(src, claheDst);
cv.imshow('canvasOutput', equalDst);
cv.imshow('canvasOutput', claheDst);
src.delete(); equalDst.delete(); claheDst.delete(); clahe.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,68 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Equalize Histogram Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Equalize Histogram Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.equalizeHist(src, dst);
cv.imshow('canvasOutput', src);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,79 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Hough Circles Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Hough Circles Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8U);
let circles = new cv.Mat();
let color = new cv.Scalar(255, 0, 0);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
// You can try more different parameters
cv.HoughCircles(src, circles, cv.HOUGH_GRADIENT,
1, 45, 75, 40, 0, 0);
// draw circles
for (let i = 0; i < circles.cols; ++i) {
let x = circles.data32F[i * 3];
let y = circles.data32F[i * 3 + 1];
let radius = circles.data32F[i * 3 + 2];
let center = new cv.Point(x, y);
cv.circle(dst, center, radius, color);
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); circles.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('coins.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,83 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Hough Lines Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Hough Lines Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
let lines = new cv.Mat();
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.Canny(src, src, 50, 200, 3);
// You can try more different parameters
cv.HoughLines(src, lines, 1, Math.PI / 180,
30, 0, 0, 0, Math.PI);
// draw lines
for (let i = 0; i < lines.rows; ++i) {
let rho = lines.data32F[i * 2];
let theta = lines.data32F[i * 2 + 1];
let a = Math.cos(theta);
let b = Math.sin(theta);
let x0 = a * rho;
let y0 = b * rho;
let startPoint = {x: x0 - 1000 * b, y: y0 + 1000 * a};
let endPoint = {x: x0 + 1000 * b, y: y0 - 1000 * a};
cv.line(dst, startPoint, endPoint, [255, 0, 0, 255]);
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); lines.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,77 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image HoughLinesP Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image HoughLinesP Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3);
let lines = new cv.Mat();
let color = new cv.Scalar(255, 0, 0);
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0);
cv.Canny(src, src, 50, 200, 3);
// You can try more different parameters
cv.HoughLinesP(src, lines, 1, Math.PI / 180, 2, 0, 0);
// draw lines
for (let i = 0; i < lines.rows; ++i) {
let startPoint = new cv.Point(lines.data32S[i * 4], lines.data32S[i * 4 + 1]);
let endPoint = new cv.Point(lines.data32S[i * 4 + 2], lines.data32S[i * 4 + 3]);
cv.line(dst, startPoint, endPoint, color);
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); lines.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,114 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Bitwise Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Bitwise Example</h2>
<p>
&lt;canvas&gt; elements named <b>imageCanvasInput</b>, <b>logoCanvasInput</b> and <b>CanvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="imageCanvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">imageCanvasInput <input type="file" id="imageFileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
<tr>
<td>
<canvas id="logoCanvasInput"></canvas>
</td>
<td>
</td>
</tr>
<tr>
<td>
<div class="caption">logoCanvasInput <input type="file" id="logoFileInput" name="file" accept="image/*" /></div>
</td>
<td>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('imageCanvasInput');
let logo = cv.imread('logoCanvasInput');
let dst = new cv.Mat();
let roi = new cv.Mat();
let mask = new cv.Mat();
let maskInv = new cv.Mat();
let imgBg = new cv.Mat();
let imgFg = new cv.Mat();
let sum = new cv.Mat();
let rect = new cv.Rect(0, 0, logo.cols, logo.rows);
// I want to put logo on top-left corner, So I create a ROI
roi = src.roi(rect);
// Create a mask of logo and create its inverse mask also
cv.cvtColor(logo, mask, cv.COLOR_RGBA2GRAY, 0);
cv.threshold(mask, mask, 100, 255, cv.THRESH_BINARY);
cv.bitwise_not(mask, maskInv);
// Black-out the area of logo in ROI
cv.bitwise_and(roi, roi, imgBg, maskInv);
// Take only region of logo from logo image
cv.bitwise_and(logo, logo, imgFg, mask);
// Put logo in ROI and modify the main image
cv.add(imgBg, imgFg, sum);
dst = src.clone();
for (let i = 0; i < logo.rows; i++) {
for (let j = 0; j < logo.cols; j++) {
dst.ucharPtr(i, j)[0] = sum.ucharPtr(i, j)[0];
}
}
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); logo.delete(); roi.delete(); mask.delete();
maskInv.delete(); imgBg.delete(); imgFg.delete(); sum.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'imageCanvasInput');
utils.loadImageToCanvas('lenaFace.png', 'logoCanvasInput');
utils.addFileInputHandler('imageFileInput', 'imageCanvasInput');
utils.addFileInputHandler('logoFileInput', 'logoCanvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,263 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Classification Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Classification Example</h2>
<p>
This tutorial shows you how to write an image classification example with OpenCV.js.<br>
To try the example you should click the <b>modelFile</b> button(and <b>configFile</b> button if needed) to upload inference model.
You can find the model URLs and parameters in the <a href="#appendix">model info</a> section.
Then You should change the parameters in the first code snippet according to the uploaded model.
Finally click <b>Try it</b> button to see the result. You can choose any other images.<br>
</p>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput" width="400" height="400"></canvas>
</td>
<td>
<table style="visibility: hidden;" id="result">
<thead>
<tr>
<th scope="col">#</th>
<th scope="col" width=300>Label</th>
<th scope="col">Probability</th>
</tr>
</thead>
<tbody>
<tr>
<th scope="row">1</th>
<td id="label0" align="center"></td>
<td id="prob0" align="center"></td>
</tr>
<tr>
<th scope="row">2</th>
<td id="label1" align="center"></td>
<td id="prob1" align="center"></td>
</tr>
<tr>
<th scope="row">3</th>
<td id="label2" align="center"></td>
<td id="prob2" align="center"></td>
</tr>
</tbody>
</table>
<p id='status' align="left"></p>
</td>
</tr>
<tr>
<td>
<div class="caption">
canvasInput <input type="file" id="fileInput" name="file" accept="image/*">
</div>
</td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">
modelFile <input type="file" id="modelFile">
</div>
</td>
</tr>
<tr>
<td>
<div class="caption">
configFile <input type="file" id="configFile">
</div>
</td>
</tr>
</table>
</div>
<div>
<p class="err" id="errorMessage"></p>
</div>
<div>
<h3>Help function</h3>
<p>1.The parameters for model inference which you can modify to investigate more models.</p>
<textarea class="code" rows="13" cols="100" id="codeEditor" spellcheck="false"></textarea>
<p>2.Main loop in which will read the image from canvas and do inference once.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor1" spellcheck="false"></textarea>
<p>3.Load labels from txt file and process it into an array.</p>
<textarea class="code" rows="7" cols="100" id="codeEditor2" spellcheck="false"></textarea>
<p>4.Get blob from image as input for net, and standardize it with <b>mean</b> and <b>std</b>.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor3" spellcheck="false"></textarea>
<p>5.Fetch model file and save to emscripten file system once click the input button.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor4" spellcheck="false"></textarea>
<p>6.The post-processing, including softmax if needed and get the top classes from the output vector.</p>
<textarea class="code" rows="35" cols="100" id="codeEditor5" spellcheck="false"></textarea>
</div>
<div id="appendix">
<h2>Model Info:</h2>
</div>
<script src="utils.js" type="text/javascript"></script>
<script src="js_dnn_example_helper.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
inputSize = [224,224];
mean = [104, 117, 123];
std = 1;
swapRB = false;
// record if need softmax function for post-processing
needSoftmax = false;
// url for label file, can from local or Internet
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt";
</script>
<script id="codeSnippet1" type="text/code-snippet">
main = async function() {
const labels = await loadLables(labelsUrl);
const input = getBlobFromImage(inputSize, mean, std, swapRB, 'canvasInput');
let net = cv.readNet(configPath, modelPath);
net.setInput(input);
const start = performance.now();
const result = net.forward();
const time = performance.now()-start;
const probs = softmax(result);
const classes = getTopClasses(probs, labels);
updateResult(classes, time);
input.delete();
net.delete();
result.delete();
}
</script>
<script id="codeSnippet5" type="text/code-snippet">
softmax = function(result) {
let arr = result.data32F;
if (needSoftmax) {
const maxNum = Math.max(...arr);
const expSum = arr.map((num) => Math.exp(num - maxNum)).reduce((a, b) => a + b);
return arr.map((value, index) => {
return Math.exp(value - maxNum) / expSum;
});
} else {
return arr;
}
}
</script>
<script type="text/javascript">
let jsonUrl = "js_image_classification_model_info.json";
drawInfoTable(jsonUrl, 'appendix');
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadCode('codeSnippet1', 'codeEditor1');
let loadLablesCode = 'loadLables = ' + loadLables.toString();
document.getElementById('codeEditor2').value = loadLablesCode;
let getBlobFromImageCode = 'getBlobFromImage = ' + getBlobFromImage.toString();
document.getElementById('codeEditor3').value = getBlobFromImageCode;
let loadModelCode = 'loadModel = ' + loadModel.toString();
document.getElementById('codeEditor4').value = loadModelCode;
utils.loadCode('codeSnippet5', 'codeEditor5');
let getTopClassesCode = 'getTopClasses = ' + getTopClasses.toString();
document.getElementById('codeEditor5').value += '\n' + '\n' + getTopClassesCode;
let canvas = document.getElementById('canvasInput');
let ctx = canvas.getContext('2d');
let img = new Image();
img.crossOrigin = 'anonymous';
img.src = 'space_shuttle.jpg';
img.onload = function() {
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
};
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
initStatus();
document.getElementById('status').innerHTML = 'Running function main()...';
utils.executeCode('codeEditor');
utils.executeCode('codeEditor1');
if (modelPath === "") {
document.getElementById('status').innerHTML = 'Runing failed.';
utils.printError('Please upload model file by clicking the button first.');
} else {
setTimeout(main, 1);
}
});
let fileInput = document.getElementById('fileInput');
fileInput.addEventListener('change', (e) => {
initStatus();
loadImageToCanvas(e, 'canvasInput');
});
let configPath = "";
let configFile = document.getElementById('configFile');
configFile.addEventListener('change', async (e) => {
initStatus();
configPath = await loadModel(e);
document.getElementById('status').innerHTML = `The config file '${configPath}' is created successfully.`;
});
let modelPath = "";
let modelFile = document.getElementById('modelFile');
modelFile.addEventListener('change', async (e) => {
initStatus();
modelPath = await loadModel(e);
document.getElementById('status').innerHTML = `The model file '${modelPath}' is created successfully.`;
configPath = "";
configFile.value = "";
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
var main = async function() {};
var softmax = function(result){};
var getTopClasses = function(mat, labels, topK = 3){};
utils.executeCode('codeEditor1');
utils.executeCode('codeEditor2');
utils.executeCode('codeEditor3');
utils.executeCode('codeEditor4');
utils.executeCode('codeEditor5');
function updateResult(classes, time) {
try{
classes.forEach((c,i) => {
let labelElement = document.getElementById('label'+i);
let probElement = document.getElementById('prob'+i);
labelElement.innerHTML = c.label;
probElement.innerHTML = c.prob + '%';
});
let result = document.getElementById('result');
result.style.visibility = 'visible';
document.getElementById('status').innerHTML = `<b>Model:</b> ${modelPath}<br>
<b>Inference time:</b> ${time.toFixed(2)} ms`;
} catch(e) {
console.log(e);
}
}
function initStatus() {
document.getElementById('status').innerHTML = '';
document.getElementById('result').style.visibility = 'hidden';
utils.clearError();
}
</script>
</body>
</html>

View File

@@ -0,0 +1,65 @@
{
"caffe": [
{
"model": "alexnet",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"needSoftmax": "false",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"modelUrl": "http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel",
"configUrl": "https://raw.githubusercontent.com/BVLC/caffe/master/models/bvlc_alexnet/deploy.prototxt"
},
{
"model": "densenet",
"mean": "127.5, 127.5, 127.5",
"std": "0.007843",
"swapRB": "false",
"needSoftmax": "true",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"modelUrl": "https://drive.google.com/open?id=0B7ubpZO7HnlCcHlfNmJkU2VPelE",
"configUrl": "https://raw.githubusercontent.com/shicai/DenseNet-Caffe/master/DenseNet_121.prototxt"
},
{
"model": "googlenet",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"needSoftmax": "false",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"modelUrl": "http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel",
"configUrl": "https://raw.githubusercontent.com/BVLC/caffe/master/models/bvlc_googlenet/deploy.prototxt"
},
{
"model": "squeezenet",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"needSoftmax": "false",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"modelUrl": "https://raw.githubusercontent.com/forresti/SqueezeNet/master/SqueezeNet_v1.0/squeezenet_v1.0.caffemodel",
"configUrl": "https://raw.githubusercontent.com/forresti/SqueezeNet/master/SqueezeNet_v1.0/deploy.prototxt"
},
{
"model": "VGG",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"needSoftmax": "false",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt",
"modelUrl": "http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_19_layers.caffemodel",
"configUrl": "https://gist.githubusercontent.com/ksimonyan/3785162f95cd2d5fee77/raw/f02f8769e64494bcd3d7e97d5d747ac275825721/VGG_ILSVRC_19_layers_deploy.prototxt"
}
],
"tensorflow": [
{
"model": "inception",
"mean": "123, 117, 104",
"std": "1",
"swapRB": "true",
"needSoftmax": "false",
"labelsUrl": "https://raw.githubusercontent.com/petewarden/tf_ios_makefile_example/master/data/imagenet_comp_graph_label_strings.txt",
"modelUrl": "https://raw.githubusercontent.com/petewarden/tf_ios_makefile_example/master/data/tensorflow_inception_graph.pb"
}
]
}

View File

@@ -0,0 +1,269 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Classification Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
<script src="./webnn-polyfill.js"></script>
</head>
<body>
<h2>Image Classification Example</h2>
<p>
This tutorial shows you how to write an image classification example with OpenCV.js.<br>
To try the example you should click the <b>modelFile</b> button(and <b>configFile</b> button if needed) to upload inference model.
You can find the model URLs and parameters in the <a href="#appendix">model info</a> section.
Then You should change the parameters in the first code snippet according to the uploaded model.
Finally click <b>Try it</b> button to see the result. You can choose any other images.<br>
</p>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput" width="400" height="400"></canvas>
</td>
<td>
<table style="visibility: hidden;" id="result">
<thead>
<tr>
<th scope="col">#</th>
<th scope="col" width=300>Label</th>
<th scope="col">Probability</th>
</tr>
</thead>
<tbody>
<tr>
<th scope="row">1</th>
<td id="label0" align="center"></td>
<td id="prob0" align="center"></td>
</tr>
<tr>
<th scope="row">2</th>
<td id="label1" align="center"></td>
<td id="prob1" align="center"></td>
</tr>
<tr>
<th scope="row">3</th>
<td id="label2" align="center"></td>
<td id="prob2" align="center"></td>
</tr>
</tbody>
</table>
<p id='status' align="left"></p>
</td>
</tr>
<tr>
<td>
<div class="caption">
canvasInput <input type="file" id="fileInput" name="file" accept="image/*">
</div>
</td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">
modelFile <input type="file" id="modelFile">
</div>
</td>
</tr>
<tr>
<td>
<div class="caption">
configFile <input type="file" id="configFile">
</div>
</td>
</tr>
</table>
</div>
<div>
<p class="err" id="errorMessage"></p>
</div>
<div>
<h3>Help function</h3>
<p>1.The parameters for model inference which you can modify to investigate more models.</p>
<textarea class="code" rows="13" cols="100" id="codeEditor" spellcheck="false"></textarea>
<p>2.Main loop in which will read the image from canvas and do inference once.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor1" spellcheck="false"></textarea>
<p>3.Load labels from txt file and process it into an array.</p>
<textarea class="code" rows="7" cols="100" id="codeEditor2" spellcheck="false"></textarea>
<p>4.Get blob from image as input for net, and standardize it with <b>mean</b> and <b>std</b>.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor3" spellcheck="false"></textarea>
<p>5.Fetch model file and save to emscripten file system once click the input button.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor4" spellcheck="false"></textarea>
<p>6.The post-processing, including softmax if needed and get the top classes from the output vector.</p>
<textarea class="code" rows="35" cols="100" id="codeEditor5" spellcheck="false"></textarea>
</div>
<div id="appendix">
<h2>Model Info:</h2>
</div>
<script src="utils.js" type="text/javascript"></script>
<script src="js_dnn_example_helper.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
inputSize = [224,224];
mean = [104, 117, 123];
std = 1;
swapRB = false;
// record if need softmax function for post-processing
needSoftmax = false;
// url for label file, can from local or Internet
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt";
</script>
<script id="codeSnippet1" type="text/code-snippet">
main = async function() {
const labels = await loadLables(labelsUrl);
const input = getBlobFromImage(inputSize, mean, std, swapRB, 'canvasInput');
let net = cv.readNet(configPath, modelPath);
net.setPreferableBackend(6);
net.setInput(input);
let result = net.forward();
const start = performance.now();
for (i=0;i<200;i++)
{
result = net.forward();
}
const time = performance.now()-start;
const probs = softmax(result);
const classes = getTopClasses(probs, labels);
updateResult(classes, time/200);
input.delete();
net.delete();
result.delete();
}
</script>
<script id="codeSnippet5" type="text/code-snippet">
softmax = function(result) {
let arr = result.data32F;
if (needSoftmax) {
const maxNum = Math.max(...arr);
const expSum = arr.map((num) => Math.exp(num - maxNum)).reduce((a, b) => a + b);
return arr.map((value, index) => {
return Math.exp(value - maxNum) / expSum;
});
} else {
return arr;
}
}
</script>
<script type="text/javascript">
let jsonUrl = "js_image_classification_model_info.json";
drawInfoTable(jsonUrl, 'appendix');
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadCode('codeSnippet1', 'codeEditor1');
let loadLablesCode = 'loadLables = ' + loadLables.toString();
document.getElementById('codeEditor2').value = loadLablesCode;
let getBlobFromImageCode = 'getBlobFromImage = ' + getBlobFromImage.toString();
document.getElementById('codeEditor3').value = getBlobFromImageCode;
let loadModelCode = 'loadModel = ' + loadModel.toString();
document.getElementById('codeEditor4').value = loadModelCode;
utils.loadCode('codeSnippet5', 'codeEditor5');
let getTopClassesCode = 'getTopClasses = ' + getTopClasses.toString();
document.getElementById('codeEditor5').value += '\n' + '\n' + getTopClassesCode;
let canvas = document.getElementById('canvasInput');
let ctx = canvas.getContext('2d');
let img = new Image();
img.crossOrigin = 'anonymous';
img.src = 'space_shuttle.jpg';
img.onload = function() {
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
};
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
initStatus();
document.getElementById('status').innerHTML = 'Running function main()...';
utils.executeCode('codeEditor');
utils.executeCode('codeEditor1');
if (modelPath === "") {
document.getElementById('status').innerHTML = 'Runing failed.';
utils.printError('Please upload model file by clicking the button first.');
} else {
setTimeout(main, 1);
}
});
let fileInput = document.getElementById('fileInput');
fileInput.addEventListener('change', (e) => {
initStatus();
loadImageToCanvas(e, 'canvasInput');
});
let configPath = "";
let configFile = document.getElementById('configFile');
configFile.addEventListener('change', async (e) => {
initStatus();
configPath = await loadModel(e);
document.getElementById('status').innerHTML = `The config file '${configPath}' is created successfully.`;
});
let modelPath = "";
let modelFile = document.getElementById('modelFile');
modelFile.addEventListener('change', async (e) => {
initStatus();
modelPath = await loadModel(e);
document.getElementById('status').innerHTML = `The model file '${modelPath}' is created successfully.`;
configPath = "";
configFile.value = "";
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
var main = async function() {};
var softmax = function(result){};
var getTopClasses = function(mat, labels, topK = 3){};
utils.executeCode('codeEditor1');
utils.executeCode('codeEditor2');
utils.executeCode('codeEditor3');
utils.executeCode('codeEditor4');
utils.executeCode('codeEditor5');
function updateResult(classes, time) {
try{
classes.forEach((c,i) => {
let labelElement = document.getElementById('label'+i);
let probElement = document.getElementById('prob'+i);
labelElement.innerHTML = c.label;
probElement.innerHTML = c.prob + '%';
});
let result = document.getElementById('result');
result.style.visibility = 'visible';
document.getElementById('status').innerHTML = `<b>Model:</b> ${modelPath}<br>
<b>Inference time:</b> ${time.toFixed(2)} ms`;
} catch(e) {
console.log(e);
}
}
function initStatus() {
document.getElementById('status').innerHTML = '';
document.getElementById('result').style.visibility = 'hidden';
utils.clearError();
}
</script>
</body>
</html>

View File

@@ -0,0 +1,281 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Classification Example with Camera</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Classification Example with Camera</h2>
<p>
This tutorial shows you how to write an image classification example with camera.<br>
To try the example you should click the <b>modelFile</b> button(and <b>configFile</b> button if needed) to upload inference model.
You can find the model URLs and parameters in the <a href="#appendix">model info</a> section.
Then You should change the parameters in the first code snippet according to the uploaded model.
Finally click <b>Start/Stop</b> button to start or stop the camera capture.<br>
</p>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="400" height="400"></video>
</td>
<td>
<table style="visibility: hidden;" id="result">
<thead>
<tr>
<th scope="col">#</th>
<th scope="col" width=300>Label</th>
<th scope="col">Probability</th>
</tr>
</thead>
<tbody>
<tr>
<th scope="row">1</th>
<td id="label0" align="center"></td>
<td id="prob0" align="center"></td>
</tr>
<tr>
<th scope="row">2</th>
<td id="label1" align="center"></td>
<td id="prob1" align="center"></td>
</tr>
<tr>
<th scope="row">3</th>
<td id="label2" align="center"></td>
<td id="prob2" align="center"></td>
</tr>
</tbody>
</table>
<p id='status' align="left"></p>
</td>
</tr>
<tr>
<td>
<div class="caption">
videoInput
</div>
</td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">
modelFile <input type="file" id="modelFile">
</div>
</td>
</tr>
<tr>
<td>
<div class="caption">
configFile <input type="file" id="configFile">
</div>
</td>
</tr>
</table>
</div>
<div>
<p class="err" id="errorMessage"></p>
</div>
<div>
<h3>Help function</h3>
<p>1.The parameters for model inference which you can modify to investigate more models.</p>
<textarea class="code" rows="13" cols="100" id="codeEditor" spellcheck="false"></textarea>
<p>2.The function to capture video from camera, and the main loop in which will do inference once.</p>
<textarea class="code" rows="35" cols="100" id="codeEditor1" spellcheck="false"></textarea>
<p>3.Load labels from txt file and process it into an array.</p>
<textarea class="code" rows="7" cols="100" id="codeEditor2" spellcheck="false"></textarea>
<p>4.Get blob from image as input for net, and standardize it with <b>mean</b> and <b>std</b>.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor3" spellcheck="false"></textarea>
<p>5.Fetch model file and save to emscripten file system once click the input button.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor4" spellcheck="false"></textarea>
<p>6.The post-processing, including softmax if needed and get the top classes from the output vector.</p>
<textarea class="code" rows="35" cols="100" id="codeEditor5" spellcheck="false"></textarea>
</div>
<div id="appendix">
<h2>Model Info:</h2>
</div>
<script src="utils.js" type="text/javascript"></script>
<script src="js_dnn_example_helper.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
inputSize = [224,224];
mean = [104, 117, 123];
std = 1;
swapRB = false;
// record if need softmax function for post-processing
needSoftmax = false;
// url for label file, can from local or Internet
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt";
</script>
<script id="codeSnippet1" type="text/code-snippet">
let frame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let cap = new cv.VideoCapture(video);
main = async function(frame) {
const labels = await loadLables(labelsUrl);
const input = getBlobFromImage(inputSize, mean, std, swapRB, frame);
let net = cv.readNet(configPath, modelPath);
net.setInput(input);
const start = performance.now();
const result = net.forward();
const time = performance.now()-start;
const probs = softmax(result);
const classes = getTopClasses(probs, labels);
updateResult(classes, time);
setTimeout(processVideo, 0);
input.delete();
net.delete();
result.delete();
}
function processVideo() {
try {
if (!streaming) {
return;
}
cap.read(frame);
main(frame);
} catch (err) {
utils.printError(err);
}
}
setTimeout(processVideo, 0);
</script>
<script id="codeSnippet5" type="text/code-snippet">
softmax = function(result) {
let arr = result.data32F;
if (needSoftmax) {
const maxNum = Math.max(...arr);
const expSum = arr.map((num) => Math.exp(num - maxNum)).reduce((a, b) => a + b);
return arr.map((value, index) => {
return Math.exp(value - maxNum) / expSum;
});
} else {
return arr;
}
}
</script>
<script type="text/javascript">
let jsonUrl = "js_image_classification_model_info.json";
drawInfoTable(jsonUrl, 'appendix');
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadCode('codeSnippet1', 'codeEditor1');
let loadLablesCode = 'loadLables = ' + loadLables.toString();
document.getElementById('codeEditor2').value = loadLablesCode;
let getBlobFromImageCode = 'getBlobFromImage = ' + getBlobFromImage.toString();
document.getElementById('codeEditor3').value = getBlobFromImageCode;
let loadModelCode = 'loadModel = ' + loadModel.toString();
document.getElementById('codeEditor4').value = loadModelCode;
utils.loadCode('codeSnippet5', 'codeEditor5');
let getTopClassesCode = 'getTopClasses = ' + getTopClasses.toString();
document.getElementById('codeEditor5').value += '\n' + '\n' + getTopClassesCode;
let video = document.getElementById('videoInput');
let streaming = false;
let startAndStop = document.getElementById('startAndStop');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
utils.startCamera('qvga', onVideoStarted, 'videoInput');
} else {
utils.stopCamera();
onVideoStopped();
}
});
let configPath = "";
let configFile = document.getElementById('configFile');
configFile.addEventListener('change', async (e) => {
initStatus();
configPath = await loadModel(e);
document.getElementById('status').innerHTML = `The config file '${configPath}' is created successfully.`;
});
let modelPath = "";
let modelFile = document.getElementById('modelFile');
modelFile.addEventListener('change', async (e) => {
initStatus();
modelPath = await loadModel(e);
document.getElementById('status').innerHTML = `The model file '${modelPath}' is created successfully.`;
configPath = "";
configFile.value = "";
});
utils.loadOpenCv(() => {
startAndStop.removeAttribute('disabled');
});
var main = async function(frame) {};
var softmax = function(result){};
var getTopClasses = function(mat, labels, topK = 3){};
utils.executeCode('codeEditor1');
utils.executeCode('codeEditor2');
utils.executeCode('codeEditor3');
utils.executeCode('codeEditor4');
utils.executeCode('codeEditor5');
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.width = videoInput.videoWidth;
videoInput.height = videoInput.videoHeight;
utils.executeCode('codeEditor');
utils.executeCode('codeEditor1');
}
function onVideoStopped() {
streaming = false;
startAndStop.innerText = 'Start';
initStatus();
}
function updateResult(classes, time) {
try{
classes.forEach((c,i) => {
let labelElement = document.getElementById('label'+i);
let probElement = document.getElementById('prob'+i);
labelElement.innerHTML = c.label;
probElement.innerHTML = c.prob + '%';
});
let result = document.getElementById('result');
result.style.visibility = 'visible';
document.getElementById('status').innerHTML = `<b>Model:</b> ${modelPath}<br>
<b>Inference time:</b> ${time.toFixed(2)} ms`;
} catch(e) {
console.log(e);
}
}
function initStatus() {
document.getElementById('status').innerHTML = '';
document.getElementById('result').style.visibility = 'hidden';
utils.clearError();
}
</script>
</body>
</html>

View File

@@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Read and Show Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Read and Show Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="8" cols="80" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// To distinguish the input and output, we graying the image.
// You can try different conversions.
cv.cvtColor(src, dst, cv.COLOR_RGBA2GRAY);
cv.imshow('canvasOutput', dst);
src.delete();
dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,700 @@
<!DOCTYPE html>
<html >
<head>
<meta charset="utf-8">
<title>Image Processing Video Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
<style type="text/css">
.dg {
text-align: left;
}
.dg .property-name {
font: 11px Lucida Grande,sans-serif;
line-height: 27px;
}
.dg.main .close-button {
font: 11px Lucida Grande,sans-serif;
line-height: 27px;
}
.cell-top {
vertical-align: top;
}
</style>
</head>
<body>
<h2>Image Processing Video Example</h2>
<p>
Open the controls and try different image processing filters.
</p>
<p class="err" id="errorMessage"></p>
<div id="container">
<table>
<tr>
<td></td>
<td>
<div>
<span>Current Filter: </span><span id="filterName">Pass Through</span>
</div>
</td>
<td>
<div>Select Filter:</div>
</td>
<td></td>
</tr>
<tr>
<td></td>
<td class="cell-top">
<canvas id="canvasOutput" width="640" height="480"></canvas>
</td>
<td class="cell-top">
<div id="guiContainer"></div>
</td>
<td></td>
</tr>
</table>
<div>
<video id="videoInput" class="hidden">Your browser does not support the video tag.</video>
</div>
</div>
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/stats.js/r16/Stats.min.js" type="text/javascript"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/dat-gui/0.6.4/dat.gui.min.js" type="text/javascript"></script>
<script src="utils.js" type="text/javascript"></script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
let width = 0;
let height = 0;
let resolution = window.innerWidth < 960 ? 'qvga' : 'vga';
// whether streaming video from the camera.
let streaming = false;
let video = document.getElementById('videoInput');
let vc = null;
let container = document.getElementById('container');
let lastFilter = '';
let src = null;
let dstC1 = null;
let dstC3 = null;
let dstC4 = null;
function startVideoProcessing() {
src = new cv.Mat(height, width, cv.CV_8UC4);
dstC1 = new cv.Mat(height, width, cv.CV_8UC1);
dstC3 = new cv.Mat(height, width, cv.CV_8UC3);
dstC4 = new cv.Mat(height, width, cv.CV_8UC4);
requestAnimationFrame(processVideo);
}
function passThrough(src) {
return src;
}
function gray(src) {
cv.cvtColor(src, dstC1, cv.COLOR_RGBA2GRAY);
return dstC1;
}
function hsv(src) {
cv.cvtColor(src, dstC3, cv.COLOR_RGBA2RGB);
cv.cvtColor(dstC3, dstC3, cv.COLOR_RGB2HSV);
return dstC3;
}
function canny(src) {
cv.cvtColor(src, dstC1, cv.COLOR_RGBA2GRAY);
cv.Canny(dstC1, dstC1, controls.cannyThreshold1, controls.cannyThreshold2,
controls.cannyApertureSize, controls.cannyL2Gradient);
return dstC1;
}
function inRange(src) {
let lowValue = controls.inRangeLow;
let lowScalar = new cv.Scalar(lowValue, lowValue, lowValue, 255);
let highValue = controls.inRangeHigh;
let highScalar = new cv.Scalar(highValue, highValue, highValue, 255);
let low = new cv.Mat(height, width, src.type(), lowScalar);
let high = new cv.Mat(height, width, src.type(), highScalar);
cv.inRange(src, low, high, dstC1);
low.delete(); high.delete();
return dstC1;
}
function threshold(src) {
cv.threshold(src, dstC4, controls.thresholdValue, 200, cv.THRESH_BINARY);
return dstC4;
}
function adaptiveThreshold(src) {
let mat = new cv.Mat(height, width, cv.CV_8U);
cv.cvtColor(src, mat, cv.COLOR_RGBA2GRAY);
cv.adaptiveThreshold(mat, dstC1, 200, cv.ADAPTIVE_THRESH_GAUSSIAN_C,
cv.THRESH_BINARY, Number(controls.adaptiveBlockSize), 2);
mat.delete();
return dstC1;
}
function gaussianBlur(src) {
cv.GaussianBlur(src, dstC4,
{width: controls.gaussianBlurSize, height: controls.gaussianBlurSize},
0, 0, cv.BORDER_DEFAULT);
return dstC4;
}
function bilateralFilter(src) {
let mat = new cv.Mat(height, width, cv.CV_8UC3);
cv.cvtColor(src, mat, cv.COLOR_RGBA2RGB);
cv.bilateralFilter(mat, dstC3, controls.bilateralFilterDiameter, controls.bilateralFilterSigma,
controls.bilateralFilterSigma, cv.BORDER_DEFAULT);
mat.delete();
return dstC3;
}
function medianBlur(src) {
cv.medianBlur(src, dstC4, controls.medianBlurSize);
return dstC4;
}
function sobel(src) {
let mat = new cv.Mat(height, width, cv.CV_8UC1);
cv.cvtColor(src, mat, cv.COLOR_RGB2GRAY, 0);
cv.Sobel(mat, dstC1, cv.CV_8U, 1, 0, controls.sobelSize, 1, 0, cv.BORDER_DEFAULT);
mat.delete();
return dstC1;
}
function scharr(src) {
let mat = new cv.Mat(height, width, cv.CV_8UC1);
cv.cvtColor(src, mat, cv.COLOR_RGB2GRAY, 0);
cv.Scharr(mat, dstC1, cv.CV_8U, 1, 0, 1, 0, cv.BORDER_DEFAULT);
mat.delete();
return dstC1;
}
function laplacian(src) {
let mat = new cv.Mat(height, width, cv.CV_8UC1);
cv.cvtColor(src, mat, cv.COLOR_RGB2GRAY);
cv.Laplacian(mat, dstC1, cv.CV_8U, controls.laplacianSize, 1, 0, cv.BORDER_DEFAULT);
mat.delete();
return dstC1;
}
let contoursColor = [];
for (let i = 0; i < 10000; i++) {
contoursColor.push([Math.round(Math.random() * 255),
Math.round(Math.random() * 255),
Math.round(Math.random() * 255), 0]);
}
function contours(src) {
cv.cvtColor(src, dstC1, cv.COLOR_RGBA2GRAY);
cv.threshold(dstC1, dstC4, 120, 200, cv.THRESH_BINARY);
let contours = new cv.MatVector();
let hierarchy = new cv.Mat();
cv.findContours(dstC4, contours, hierarchy,
Number(controls.contoursMode),
Number(controls.contoursMethod), {x: 0, y: 0});
dstC3.delete();
dstC3 = cv.Mat.ones(height, width, cv.CV_8UC3);
for (let i = 0; i<contours.size(); ++i) {
let color = contoursColor[i];
cv.drawContours(dstC3, contours, i, color, 1, cv.LINE_8, hierarchy);
}
contours.delete(); hierarchy.delete();
return dstC3;
}
function calcHist(src) {
cv.cvtColor(src, dstC1, cv.COLOR_RGBA2GRAY);
let srcVec = new cv.MatVector();
srcVec.push_back(dstC1);
let scale = 2;
let channels = [0];
let histSize = [src.cols/scale];
const ranges = [0, 255];
let hist = new cv.Mat();
let mask = new cv.Mat();
let color = new cv.Scalar(0xfb, 0xca, 0x04, 0xff);
cv.calcHist(srcVec, channels, mask, hist, histSize, ranges);
let result = cv.minMaxLoc(hist, mask);
let max = result.maxVal;
cv.cvtColor(dstC1, dstC4, cv.COLOR_GRAY2RGBA);
// draw histogram on src
for (let i = 0; i < histSize[0]; i++) {
let binVal = hist.data32F[i] * src.rows / max;
cv.rectangle(dstC4, {x: i * scale, y: src.rows - 1},
{x: (i + 1) * scale - 1, y: src.rows - binVal/3}, color, cv.FILLED);
}
srcVec.delete();
mask.delete();
hist.delete();
return dstC4;
}
function equalizeHist(src) {
cv.cvtColor(src, dstC1, cv.COLOR_RGBA2GRAY, 0);
cv.equalizeHist(dstC1, dstC1);
return dstC1;
}
let base;
function backprojection(src) {
if (lastFilter !== 'backprojection') {
if (base instanceof cv.Mat) {
base.delete();
}
base = src.clone();
cv.cvtColor(base, base, cv.COLOR_RGB2HSV, 0);
}
cv.cvtColor(src, dstC3, cv.COLOR_RGB2HSV, 0);
let baseVec = new cv.MatVector();
let targetVec = new cv.MatVector();
baseVec.push_back(base); targetVec.push_back(dstC3);
let mask = new cv.Mat();
let hist = new cv.Mat();
let channels = [0];
let histSize = [50];
let ranges;
if (controls.backprojectionRangeLow < controls.backprojectionRangeHigh) {
ranges = [controls.backprojectionRangeLow, controls.backprojectionRangeHigh];
} else {
return src;
}
cv.calcHist(baseVec, channels, mask, hist, histSize, ranges);
cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX);
cv.calcBackProject(targetVec, channels, hist, dstC1, ranges, 1);
baseVec.delete();
targetVec.delete();
mask.delete();
hist.delete();
return dstC1;
}
function erosion(src) {
let kernelSize = controls.erosionSize;
let kernel = cv.Mat.ones(kernelSize, kernelSize, cv.CV_8U);
let color = new cv.Scalar();
cv.erode(src, dstC4, kernel, {x: -1, y: -1}, 1, Number(controls.erosionBorderType), color);
kernel.delete();
return dstC4;
}
function dilation(src) {
let kernelSize = controls.dilationSize;
let kernel = cv.Mat.ones(kernelSize, kernelSize, cv.CV_8U);
let color = new cv.Scalar();
cv.dilate(src, dstC4, kernel, {x: -1, y: -1}, 1, Number(controls.dilationBorderType), color);
kernel.delete();
return dstC4;
}
function morphology(src) {
let kernelSize = controls.morphologySize;
let kernel = cv.getStructuringElement(Number(controls.morphologyShape),
{width: kernelSize, height: kernelSize});
let color = new cv.Scalar();
let op = Number(controls.morphologyOp);
let image = src;
if (op === cv.MORPH_GRADIENT || op === cv.MORPH_TOPHAT || op === cv.MORPH_BLACKHAT) {
cv.cvtColor(src, dstC3, cv.COLOR_RGBA2RGB);
image = dstC3;
}
cv.morphologyEx(image, dstC4, op, kernel, {x: -1, y: -1}, 1,
Number(controls.morphologyBorderType), color);
kernel.delete();
return dstC4;
}
function processVideo() {
if (!streaming) return;
stats.begin();
vc.read(src);
let result;
switch (controls.filter) {
case 'passThrough': result = passThrough(src); break;
case 'gray': result = gray(src); break;
case 'hsv': result = hsv(src); break;
case 'canny': result = canny(src); break;
case 'inRange': result = inRange(src); break;
case 'threshold': result = threshold(src); break;
case 'adaptiveThreshold': result = adaptiveThreshold(src); break;
case 'gaussianBlur': result = gaussianBlur(src); break;
case 'bilateralFilter': result = bilateralFilter(src); break;
case 'medianBlur': result = medianBlur(src); break;
case 'sobel': result = sobel(src); break;
case 'scharr': result = scharr(src); break;
case 'laplacian': result = laplacian(src); break;
case 'contours': result = contours(src); break;
case 'calcHist': result = calcHist(src); break;
case 'equalizeHist': result = equalizeHist(src); break;
case 'backprojection': result = backprojection(src); break;
case 'erosion': result = erosion(src); break;
case 'dilation': result = dilation(src); break;
case 'morphology': result = morphology(src); break;
default: result = passThrough(src);
}
cv.imshow('canvasOutput', result);
stats.end();
lastFilter = controls.filter;
requestAnimationFrame(processVideo);
}
let stats = null;
let filters = {
'passThrough': 'Pass Through',
'gray': 'Gray',
'hsv': 'HSV',
'canny': 'Canny Edge Detection',
'inRange': 'In Range',
'threshold': 'Threshold',
'adaptiveThreshold': 'Adaptive Threshold',
'gaussianBlur': 'Gaussian Blurring',
'medianBlur': 'Median Blurring',
'bilateralFilter': 'Bilateral Filtering',
'sobel': 'Sobel Derivatives',
'scharr': 'Scharr Derivatives',
'laplacian': 'Laplacian Derivatives',
'contours': 'Contours',
'calcHist': 'Calculation',
'equalizeHist': 'Equalization',
'backprojection': 'Backprojection',
'erosion': 'Erosion',
'dilation': 'Dilation',
'morphology': 'Morphology',
};
let filterName = document.getElementById('filterName');
let controls;
function initUI() {
stats = new Stats();
stats.showPanel(0);
container.appendChild(stats.domElement);
stats.domElement.style.position = 'absolute';
stats.domElement.style.right = '0px';
stats.domElement.style.top = '0px';
controls = {
filter: 'passThrough',
setFilter: function(filter) {
this.filter = filter;
filterName.innerHTML = filters[filter];
},
passThrough: function() {
this.setFilter('passThrough');
},
gray: function() {
this.setFilter('gray');
},
hsv: function() {
this.setFilter('hsv');
},
inRange: function() {
this.setFilter('inRange');
},
inRangeLow: 75,
inRangeHigh: 150,
threshold: function() {
this.setFilter('threshold');
},
thresholdValue: 100,
adaptiveThreshold: function() {
this.setFilter('adaptiveThreshold');
},
adaptiveBlockSize: 3,
gaussianBlur: function() {
this.setFilter('gaussianBlur');
},
gaussianBlurSize: 7,
medianBlur: function() {
this.setFilter('medianBlur');
},
medianBlurSize: 5,
bilateralFilter: function() {
this.setFilter('bilateralFilter');
},
bilateralFilterDiameter: 5,
bilateralFilterSigma: 75,
sobel: function() {
this.setFilter('sobel');
},
sobelSize: 3,
scharr: function() {
this.setFilter('scharr');
},
laplacian: function() {
this.setFilter('laplacian');
},
laplacianSize: 3,
canny: function() {
this.setFilter('canny');
},
cannyThreshold1: 150,
cannyThreshold2: 300,
cannyApertureSize: 3,
cannyL2Gradient: false,
contours: function() {
this.setFilter('contours');
},
contoursMode: cv.RETR_CCOMP,
contoursMethod: cv.CHAIN_APPROX_SIMPLE,
calcHist: function() {
this.setFilter('calcHist');
},
equalizeHist: function() {
this.setFilter('equalizeHist');
},
backprojection: function() {
this.setFilter('backprojection');
},
backprojectionRangeLow: 0,
backprojectionRangeHigh: 150,
morphology: function() {
this.setFilter('morphology');
},
morphologyShape: cv.MORPH_RECT,
morphologyOp: cv.MORPH_ERODE,
morphologySize: 5,
morphologyBorderType: cv.BORDER_CONSTANT,
};
let gui = new dat.GUI({autoPlace: false});
let guiContainer = document.getElementById('guiContainer');
guiContainer.appendChild(gui.domElement);
let lastFolder = null;
function closeLastFolder(folder) {
if (lastFolder != null && lastFolder != folder) {
lastFolder.close();
}
lastFolder = folder;
}
gui.add(controls, 'passThrough').name(filters['passThrough']).onChange(function() {
closeLastFolder(null);
});
let colorConversion = gui.addFolder('Color Conversion');
colorConversion.add(controls, 'gray').name(filters['gray']).onChange(function() {
closeLastFolder(null);
});
colorConversion.add(controls, 'hsv').name(filters['hsv']).onChange(function() {
closeLastFolder(null);
});
let inRange = colorConversion.addFolder(filters['inRange']);
inRange.domElement.onclick = function() {
closeLastFolder(inRange);
controls.inRange();
};
inRange.add(controls, 'inRangeLow', 0, 255, 1).name('lower boundary');
inRange.add(controls, 'inRangeHigh', 0, 255, 1).name('higher boundary');
// let geometricTransformations = gui.addFolder('Geometric Transformations');
// TODO
let thresholding = gui.addFolder('Thresholding');
let threshold = thresholding.addFolder(filters['threshold']);
threshold.domElement.onclick = function() {
closeLastFolder(threshold);
controls.threshold();
};
threshold.add(controls, 'thresholdValue', 0, 200, 1).name('threshold value');
let adaptiveThreshold = thresholding.addFolder(filters['adaptiveThreshold']);
adaptiveThreshold.domElement.onclick = function() {
closeLastFolder(adaptiveThreshold);
controls.adaptiveThreshold();
};
adaptiveThreshold.add(
controls, 'adaptiveBlockSize', 3, 99, 1).name('block size').onChange(
function(value) {
if (value % 2 === 0) controls.adaptiveBlockSize = value + 1;
});
let smoothing = gui.addFolder('Smoothing');
let gaussianBlur = smoothing.addFolder(filters['gaussianBlur']);
gaussianBlur.domElement.onclick = function() {
closeLastFolder(gaussianBlur);
controls.gaussianBlur();
};
gaussianBlur.add(
controls, 'gaussianBlurSize', 7, 99, 1).name('kernel size').onChange(
function(value) {
if (value % 2 === 0) controls.gaussianBlurSize = value + 1;
});
let medianBlur = smoothing.addFolder(filters['medianBlur']);
medianBlur.domElement.onclick = function() {
closeLastFolder(medianBlur);
controls.medianBlur();
};
medianBlur.add(
controls, 'medianBlurSize', 3, 99, 1).name('kernel size').onChange(
function(value) {
if (value % 2 === 0) controls.medianBlurSize = value + 1;
});
let bilateralFilter = smoothing.addFolder(filters['bilateralFilter']);
bilateralFilter.domElement.onclick = function() {
closeLastFolder(bilateralFilter);
controls.bilateralFilter();
};
bilateralFilter.add(controls, 'bilateralFilterDiameter', 1, 15, 1).name('diameter');
bilateralFilter.add(controls, 'bilateralFilterSigma', 1, 255, 1).name('sigma');
let morphology = gui.addFolder('Morphology');
morphology.domElement.onclick = function() {
closeLastFolder(morphology);
controls.morphology();
};
morphology.add(
controls, 'morphologyOp',
{'MORPH_ERODE': cv.MORPH_ERODE,
'MORPH_DILATE': cv.MORPH_DILATE,
'MORPH_OPEN ': cv.MORPH_OPEN,
'MORPH_CLOSE': cv.MORPH_CLOSE,
'MORPH_GRADIENT': cv.MORPH_GRADIENT,
'MORPH_TOPHAT': cv.MORPH_TOPHAT,
'MORPH_BLACKHAT': cv.MORPH_BLACKHAT}).name('operation');
morphology.add(
controls, 'morphologyShape',
{'MORPH_RECT': cv.MORPH_RECT,
'MORPH_CROSS': cv.MORPH_CROSS,
'MORPH_ELLIPSE': cv.MORPH_ELLIPSE}).name('shape');
morphology.add(
controls, 'morphologySize', 1, 15, 1).name('kernel size').onChange(
function(value) {
if (value % 2 === 0) controls.morphologySize = value + 1;
});
morphology.add(
controls, 'morphologyBorderType',
{'BORDER_CONSTANT': cv.BORDER_CONSTANT,
'BORDER_REPLICATE': cv.BORDER_REPLICATE,
'BORDER_REFLECT': cv.BORDER_REFLECT,
'BORDER_REFLECT_101': cv.BORDER_REFLECT_101}).name('boarder type');
let gradients = gui.addFolder('Gradients');
let sobel = gradients.addFolder(filters['sobel']);
sobel.domElement.onclick = function() {
closeLastFolder(sobel);
controls.sobel();
};
sobel.add(controls, 'sobelSize', 3, 19, 1).name('kernel size').onChange(function(value) {
if (value % 2 === 0) controls.sobelSize = value + 1;
});
gradients.add(controls, 'scharr').name(filters['scharr']).onChange(function() {
closeLastFolder(null);
});
let laplacian = gradients.addFolder(filters['laplacian']);
laplacian.domElement.onclick = function() {
closeLastFolder(laplacian);
controls.laplacian();
};
laplacian.add(
controls, 'laplacianSize', 1, 19, 1).name('kernel size').onChange(
function(value) {
if (value % 2 === 0) controls.laplacianSize = value + 1;
});
let canny = gui.addFolder(filters['canny']);
canny.domElement.onclick = function() {
closeLastFolder(canny);
controls.canny();
};
canny.add(controls, 'cannyThreshold1', 1, 500, 1).name('threshold1');
canny.add(controls, 'cannyThreshold2', 1, 500, 1).name('threshold2');
canny.add(controls, 'cannyApertureSize', 3, 7, 1).name('aperture size').onChange(
function(value) {
if (value % 2 === 0) controls.cannyApertureSize = value + 1;
});
canny.add(controls, 'cannyL2Gradient').name('l2 gradient');
let contours = gui.addFolder(filters['contours']);
contours.domElement.onclick = function() {
closeLastFolder(contours);
controls.contours();
};
contours.add(
controls, 'contoursMode',
{'RETR_EXTERNAL': cv.RETR_EXTERNAL,
'RETR_LIST': cv.RETR_LIST,
'RETR_CCOMP': cv.RETR_CCOMP,
'RETR_TREE': cv.RETR_TREE}).name('mode');
contours.add(
controls, 'contoursMethod',
{'CHAIN_APPROX_NONE': cv.CHAIN_APPROX_NONE,
'CHAIN_APPROX_SIMPLE': cv.CHAIN_APPROX_SIMPLE,
'CHAIN_APPROX_TC89_L1': cv.CHAIN_APPROX_TC89_L1,
'CHAIN_APPROX_TC89_KCOS': cv.CHAIN_APPROX_TC89_KCOS}).name('method');
let histograms = gui.addFolder('Histograms');
histograms.add(controls, 'calcHist').name(filters['calcHist']).onChange(function() {
closeLastFolder(null);
});
histograms.add(controls, 'equalizeHist').name(filters['equalizeHist']).onChange(function() {
closeLastFolder(null);
});
let backprojection = histograms.addFolder(filters['backprojection']);
backprojection.domElement.onclick = function() {
closeLastFolder(backprojection);
controls.backprojection();
};
backprojection.add(controls, 'backprojectionRangeLow', 0, 255, 1).name('range low');
backprojection.add(controls, 'backprojectionRangeHigh', 0, 255, 1).name('range high');
}
function startCamera() {
if (!streaming) {
utils.clearError();
utils.startCamera(resolution, onVideoStarted, 'videoInput');
} else {
utils.stopCamera();
onVideoStopped();
}
}
function onVideoStarted() {
height = video.videoHeight;
width = video.videoWidth;
video.setAttribute('width', width);
video.setAttribute('height', height);
streaming = true;
vc = new cv.VideoCapture(video);
startVideoProcessing();
}
function stopVideoProcessing() {
if (src != null && !src.isDeleted()) src.delete();
if (dstC1 != null && !dstC1.isDeleted()) dstC1.delete();
if (dstC3 != null && !dstC3.isDeleted()) dstC3.delete();
if (dstC4 != null && !dstC4.isDeleted()) dstC4.delete();
}
function onVideoStopped() {
if (!streaming) return;
stopVideoProcessing();
document.getElementById('canvasOutput').getContext('2d').clearRect(0, 0, width, height);
streaming = false;
}
utils.loadOpenCv(() => {
initUI();
startCamera();
});
</script>
</body>
</html>

View File

@@ -0,0 +1,127 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Intelligent Scissors Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Intelligent Scissors Example</h2>
<p>
Click <b>Start</b> button to launch the code below.<br>
Then click on image to pick source point. After that you can hover mouse pointer over canvas to specify target point candidate.<br>
You can change the code in the &lt;textarea&gt; to investigate more. You can choose another image (need to "Stop" first).
</p>
<div>
<div class="control"><button id="tryIt" disabled>Start</button> <button id="stopIt" disabled>Stop</button></div>
<textarea class="code" rows="20" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div id="inputParams">
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
<canvas id="canvasInput"></canvas>
</div>
<div id="result" style="display:none">
<canvas id="canvasOutput"></canvas>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
//cv.resize(src, src, new cv.Size(1024, 1024));
cv.imshow('canvasOutput', src);
let tool = new cv.segmentation_IntelligentScissorsMB();
tool.setEdgeFeatureCannyParameters(32, 100);
tool.setGradientMagnitudeMaxLimit(200);
tool.applyImage(src);
let hasMap = false;
let canvas = document.getElementById('canvasOutput');
canvas.addEventListener('click', e => {
let startX = e.offsetX, startY = e.offsetY; console.log(startX, startY);
if (startX < src.cols && startY < src.rows)
{
console.time('buildMap');
tool.buildMap(new cv.Point(startX, startY));
console.timeEnd('buildMap');
hasMap = true;
}
});
canvas.addEventListener('mousemove', e => {
let x = e.offsetX, y = e.offsetY; //console.log(x, y);
let dst = src.clone();
if (hasMap && x >= 0 && x < src.cols && y >= 0 && y < src.rows)
{
let contour = new cv.Mat();
tool.getContour(new cv.Point(x, y), contour);
let contours = new cv.MatVector();
contours.push_back(contour);
let color = new cv.Scalar(0, 255, 0, 255); // RGBA
cv.polylines(dst, contours, false, color, 1, cv.LINE_8);
contours.delete(); contour.delete();
}
cv.imshow('canvasOutput', dst);
dst.delete();
});
canvas.addEventListener('dispose', e => {
src.delete();
tool.delete();
});
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let disposeEvent = new Event('dispose');
let tryIt = document.getElementById('tryIt');
let stopIt = document.getElementById('stopIt');
tryIt.addEventListener('click', () => {
let e_input = document.getElementById('inputParams');
e_input.style.display = 'none';
let e_result = document.getElementById("result")
e_result.style.display = '';
var e = document.getElementById("canvasOutput");
var e_new = e.cloneNode(true);
e.parentNode.replaceChild(e_new, e); // reset event handlers
stopIt.removeAttribute('disabled');
tryIt.setAttribute('disabled', '');
utils.executeCode('codeEditor');
});
stopIt.addEventListener('click', () => {
let e_input = document.getElementById('inputParams');
e_input.style.display = '';
let e_result = document.getElementById("result")
e_result.style.display = 'none';
var e = document.getElementById("canvasOutput");
e.dispatchEvent(disposeEvent);
var e_new = e.cloneNode(true);
e.parentNode.replaceChild(e_new, e); // reset event handlers
tryIt.removeAttribute('disabled');
stopIt.setAttribute('disabled', '');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,170 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>MeanShift Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>MeanShift Example</h2>
<p>
Click <b>Start/Stop</b> button to start or stop the video.<br>
The <b>videoInput</b> is a &lt;video&gt; element used as meanShift input.
The <b>canvasOutput</b> is a &lt;canvas&gt; element used as meanShift output.<br>
The code of &lt;textarea&gt; will be executed when video is started.
You can modify the code to investigate more.
</p>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false">
</textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="320" height="240" muted loop></video>
</td>
<td>
<canvas id="canvasOutput" width="320" height="240" ></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let cap = new cv.VideoCapture(video);
// take first frame of the video
let frame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
cap.read(frame);
// hardcode the initial location of window
let trackWindow = new cv.Rect(150, 60, 63, 125);
// set up the ROI for tracking
let roi = frame.roi(trackWindow);
let hsvRoi = new cv.Mat();
cv.cvtColor(roi, hsvRoi, cv.COLOR_RGBA2RGB);
cv.cvtColor(hsvRoi, hsvRoi, cv.COLOR_RGB2HSV);
let mask = new cv.Mat();
let lowScalar = new cv.Scalar(30, 30, 0);
let highScalar = new cv.Scalar(180, 180, 180);
let low = new cv.Mat(hsvRoi.rows, hsvRoi.cols, hsvRoi.type(), lowScalar);
let high = new cv.Mat(hsvRoi.rows, hsvRoi.cols, hsvRoi.type(), highScalar);
cv.inRange(hsvRoi, low, high, mask);
let roiHist = new cv.Mat();
let hsvRoiVec = new cv.MatVector();
hsvRoiVec.push_back(hsvRoi);
cv.calcHist(hsvRoiVec, [0], mask, roiHist, [180], [0, 180]);
cv.normalize(roiHist, roiHist, 0, 255, cv.NORM_MINMAX);
// delete useless mats.
roi.delete(); hsvRoi.delete(); mask.delete(); low.delete(); high.delete(); hsvRoiVec.delete();
// Setup the termination criteria, either 10 iteration or move by at least 1 pt
let termCrit = new cv.TermCriteria(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1);
let hsv = new cv.Mat(video.height, video.width, cv.CV_8UC3);
let dst = new cv.Mat();
let hsvVec = new cv.MatVector();
hsvVec.push_back(hsv);
const FPS = 30;
function processVideo() {
try {
if (!streaming) {
// clean and stop.
frame.delete(); dst.delete(); hsvVec.delete(); roiHist.delete(); hsv.delete();
return;
}
let begin = Date.now();
// start processing.
cap.read(frame);
cv.cvtColor(frame, hsv, cv.COLOR_RGBA2RGB);
cv.cvtColor(hsv, hsv, cv.COLOR_RGB2HSV);
cv.calcBackProject(hsvVec, [0], roiHist, dst, [0, 180], 1);
// Apply meanshift to get the new location
// and it also returns number of iterations meanShift took to converge,
// which is useless in this demo.
[, trackWindow] = cv.meanShift(dst, trackWindow, termCrit);
// Draw it on image
let [x, y, w, h] = [trackWindow.x, trackWindow.y, trackWindow.width, trackWindow.height];
cv.rectangle(frame, new cv.Point(x, y), new cv.Point(x+w, y+h), [255, 0, 0, 255], 2);
cv.imshow('canvasOutput', frame);
// schedule the next one.
let delay = 1000/FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
} catch (err) {
utils.printError(err);
}
};
// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');
let canvasOutput = document.getElementById('canvasOutput');
let canvasContext = canvasOutput.getContext('2d');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
videoInput.play().then(() => {
onVideoStarted();
});
} else {
videoInput.pause();
videoInput.currentTime = 0;
onVideoStopped();
}
});
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
utils.executeCode('codeEditor');
}
function onVideoStopped() {
streaming = false;
canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
startAndStop.innerText = 'Start';
}
utils.loadOpenCv(() => {
videoInput.addEventListener('canplay', () => {
startAndStop.removeAttribute('disabled');
});
videoInput.src = 'cup.mp4';
});
</script>
</body>
</html>

View File

@@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Black Hat Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Black Hat Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
cv.cvtColor(src, src, cv.COLOR_RGBA2RGB);
let dst = new cv.Mat();
let M = cv.Mat.ones(53, 53, cv.CV_8U);
// You can try more different parameters
cv.morphologyEx(src, dst, cv.MORPH_BLACKHAT, M);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,68 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Closing Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Closing Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let M = cv.Mat.ones(5, 5, cv.CV_8U);
// You can try more different parameters
cv.morphologyEx(src, dst, cv.MORPH_CLOSE, M);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Dilate Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Dilate Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let M = cv.Mat.ones(5, 5, cv.CV_8U);
let anchor = new cv.Point(-1, -1);
// You can try more different parameters
cv.dilate(src, dst, M, anchor, 1, cv.BORDER_CONSTANT, cv.morphologyDefaultBorderValue());
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Erode Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Erode Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let M = cv.Mat.ones(5, 5, cv.CV_8U);
let anchor = new cv.Point(-1, -1);
// You can try more different parameters
cv.erode(src, dst, M, anchor, 1, cv.BORDER_CONSTANT, cv.morphologyDefaultBorderValue());
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,71 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Get Structuring Element Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Get Structuring Element Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
cv.cvtColor(src, src, cv.COLOR_RGBA2RGB);
let dst = new cv.Mat();
let M = new cv.Mat();
let ksize = new cv.Size(5, 5);
// You can try more different parameters
M = cv.getStructuringElement(cv.MORPH_CROSS, ksize);
cv.morphologyEx(src, dst, cv.MORPH_GRADIENT, M);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Gradient Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Gradient Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
cv.cvtColor(src, src, cv.COLOR_RGBA2RGB);
let dst = new cv.Mat();
let M = cv.Mat.ones(5, 5, cv.CV_8U);
// You can try more different parameters
cv.morphologyEx(src, dst, cv.MORPH_GRADIENT, M);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,70 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image Opening Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image Opening Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
let M = cv.Mat.ones(5, 5, cv.CV_8U);
let anchor = new cv.Point(-1, -1);
// You can try more different parameters
cv.morphologyEx(src, dst, cv.MORPH_OPEN, M, anchor, 1,
cv.BORDER_CONSTANT, cv.morphologyDefaultBorderValue());
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,69 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Top Hat Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Top Hat Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
cv.cvtColor(src, src, cv.COLOR_RGBA2RGB);
let dst = new cv.Mat();
let M = cv.Mat.ones(9, 9, cv.CV_8U);
// You can try more different parameters
cv.morphologyEx(src, dst, cv.MORPH_TOPHAT, M);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); M.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('shape.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,387 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Object Detection Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Object Detection Example</h2>
<p>
This tutorial shows you how to write an object detection example with OpenCV.js.<br>
To try the example you should click the <b>modelFile</b> button(and <b>configFile</b> button if needed) to upload inference model.
You can find the model URLs and parameters in the <a href="#appendix">model info</a> section.
Then You should change the parameters in the first code snippet according to the uploaded model.
Finally click <b>Try it</b> button to see the result. You can choose any other images.<br>
</p>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput" width="400" height="400"></canvas>
</td>
<td>
<canvas id="canvasOutput" style="visibility: hidden;" width="400" height="400"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">
canvasInput <input type="file" id="fileInput" name="file" accept="image/*">
</div>
</td>
<td>
<p id='status' align="left"></p>
</td>
</tr>
<tr>
<td>
<div class="caption">
modelFile <input type="file" id="modelFile" name="file">
</div>
</td>
</tr>
<tr>
<td>
<div class="caption">
configFile <input type="file" id="configFile">
</div>
</td>
</tr>
</table>
</div>
<div>
<p class="err" id="errorMessage"></p>
</div>
<div>
<h3>Help function</h3>
<p>1.The parameters for model inference which you can modify to investigate more models.</p>
<textarea class="code" rows="15" cols="100" id="codeEditor" spellcheck="false"></textarea>
<p>2.Main loop in which will read the image from canvas and do inference once.</p>
<textarea class="code" rows="16" cols="100" id="codeEditor1" spellcheck="false"></textarea>
<p>3.Load labels from txt file and process it into an array.</p>
<textarea class="code" rows="7" cols="100" id="codeEditor2" spellcheck="false"></textarea>
<p>4.Get blob from image as input for net, and standardize it with <b>mean</b> and <b>std</b>.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor3" spellcheck="false"></textarea>
<p>5.Fetch model file and save to emscripten file system once click the input button.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor4" spellcheck="false"></textarea>
<p>6.The post-processing, including get boxes from output and draw boxes into the image.</p>
<textarea class="code" rows="35" cols="100" id="codeEditor5" spellcheck="false"></textarea>
</div>
<div id="appendix">
<h2>Model Info:</h2>
</div>
<script src="utils.js" type="text/javascript"></script>
<script src="js_dnn_example_helper.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
inputSize = [300, 300];
mean = [127.5, 127.5, 127.5];
std = 0.007843;
swapRB = false;
confThreshold = 0.5;
nmsThreshold = 0.4;
// The type of output, can be YOLO or SSD
outType = "SSD";
// url for label file, can from local or Internet
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/object_detection_classes_pascal_voc.txt";
</script>
<script id="codeSnippet1" type="text/code-snippet">
main = async function() {
const labels = await loadLables(labelsUrl);
const input = getBlobFromImage(inputSize, mean, std, swapRB, 'canvasInput');
let net = cv.readNet(configPath, modelPath);
net.setInput(input);
const start = performance.now();
const result = net.forward();
const time = performance.now()-start;
const output = postProcess(result, labels);
updateResult(output, time);
input.delete();
net.delete();
result.delete();
}
</script>
<script id="codeSnippet5" type="text/code-snippet">
postProcess = function(result, labels) {
let canvasOutput = document.getElementById('canvasOutput');
const outputWidth = canvasOutput.width;
const outputHeight = canvasOutput.height;
const resultData = result.data32F;
// Get the boxes(with class and confidence) from the output
let boxes = [];
switch(outType) {
case "YOLO": {
const vecNum = result.matSize[0];
const vecLength = result.matSize[1];
const classNum = vecLength - 5;
for (let i = 0; i < vecNum; ++i) {
let vector = resultData.slice(i*vecLength, (i+1)*vecLength);
let scores = vector.slice(5, vecLength);
let classId = scores.indexOf(Math.max(...scores));
let confidence = scores[classId];
if (confidence > confThreshold) {
let center_x = Math.round(vector[0] * outputWidth);
let center_y = Math.round(vector[1] * outputHeight);
let width = Math.round(vector[2] * outputWidth);
let height = Math.round(vector[3] * outputHeight);
let left = Math.round(center_x - width / 2);
let top = Math.round(center_y - height / 2);
let box = {
scores: scores,
classId: classId,
confidence: confidence,
bounding: [left, top, width, height],
toDraw: true
}
boxes.push(box);
}
}
// NMS(Non Maximum Suppression) algorithm
let boxNum = boxes.length;
let tmp_boxes = [];
let sorted_boxes = [];
for (let c = 0; c < classNum; ++c) {
for (let i = 0; i < boxes.length; ++i) {
tmp_boxes[i] = [boxes[i], i];
}
sorted_boxes = tmp_boxes.sort((a, b) => { return (b[0].scores[c] - a[0].scores[c]); });
for (let i = 0; i < boxNum; ++i) {
if (sorted_boxes[i][0].scores[c] === 0) continue;
else {
for (let j = i + 1; j < boxNum; ++j) {
if (IOU(sorted_boxes[i][0], sorted_boxes[j][0]) >= nmsThreshold) {
boxes[sorted_boxes[j][1]].toDraw = false;
}
}
}
}
}
} break;
case "SSD": {
const vecNum = result.matSize[2];
const vecLength = 7;
for (let i = 0; i < vecNum; ++i) {
let vector = resultData.slice(i*vecLength, (i+1)*vecLength);
let confidence = vector[2];
if (confidence > confThreshold) {
let left, top, right, bottom, width, height;
left = Math.round(vector[3]);
top = Math.round(vector[4]);
right = Math.round(vector[5]);
bottom = Math.round(vector[6]);
width = right - left + 1;
height = bottom - top + 1;
if (width <= 2 || height <= 2) {
left = Math.round(vector[3] * outputWidth);
top = Math.round(vector[4] * outputHeight);
right = Math.round(vector[5] * outputWidth);
bottom = Math.round(vector[6] * outputHeight);
width = right - left + 1;
height = bottom - top + 1;
}
let box = {
classId: vector[1] - 1,
confidence: confidence,
bounding: [left, top, width, height],
toDraw: true
}
boxes.push(box);
}
}
} break;
default:
console.error(`Unsupported output type ${outType}`)
}
// Draw the saved box into the image
let image = cv.imread("canvasInput");
let output = new cv.Mat(outputWidth, outputHeight, cv.CV_8UC3);
cv.cvtColor(image, output, cv.COLOR_RGBA2RGB);
let boxNum = boxes.length;
for (let i = 0; i < boxNum; ++i) {
if (boxes[i].toDraw) {
drawBox(boxes[i]);
}
}
return output;
// Calculate the IOU(Intersection over Union) of two boxes
function IOU(box1, box2) {
let bounding1 = box1.bounding;
let bounding2 = box2.bounding;
let s1 = bounding1[2] * bounding1[3];
let s2 = bounding2[2] * bounding2[3];
let left1 = bounding1[0];
let right1 = left1 + bounding1[2];
let left2 = bounding2[0];
let right2 = left2 + bounding2[2];
let overlapW = calOverlap([left1, right1], [left2, right2]);
let top1 = bounding2[1];
let bottom1 = top1 + bounding1[3];
let top2 = bounding2[1];
let bottom2 = top2 + bounding2[3];
let overlapH = calOverlap([top1, bottom1], [top2, bottom2]);
let overlapS = overlapW * overlapH;
return overlapS / (s1 + s2 + overlapS);
}
// Calculate the overlap range of two vector
function calOverlap(range1, range2) {
let min1 = range1[0];
let max1 = range1[1];
let min2 = range2[0];
let max2 = range2[1];
if (min2 > min1 && min2 < max1) {
return max1 - min2;
} else if (max2 > min1 && max2 < max1) {
return max2 - min1;
} else {
return 0;
}
}
// Draw one predict box into the origin image
function drawBox(box) {
let bounding = box.bounding;
let left = bounding[0];
let top = bounding[1];
let width = bounding[2];
let height = bounding[3];
cv.rectangle(output, new cv.Point(left, top), new cv.Point(left + width, top + height),
new cv.Scalar(0, 255, 0));
cv.rectangle(output, new cv.Point(left, top), new cv.Point(left + width, top + 15),
new cv.Scalar(255, 255, 255), cv.FILLED);
let text = `${labels[box.classId]}: ${box.confidence.toFixed(4)}`;
cv.putText(output, text, new cv.Point(left, top + 10), cv.FONT_HERSHEY_SIMPLEX, 0.3,
new cv.Scalar(0, 0, 0));
}
}
</script>
<script type="text/javascript">
let jsonUrl = "js_object_detection_model_info.json";
drawInfoTable(jsonUrl, 'appendix');
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadCode('codeSnippet1', 'codeEditor1');
let loadLablesCode = 'loadLables = ' + loadLables.toString();
document.getElementById('codeEditor2').value = loadLablesCode;
let getBlobFromImageCode = 'getBlobFromImage = ' + getBlobFromImage.toString();
document.getElementById('codeEditor3').value = getBlobFromImageCode;
let loadModelCode = 'loadModel = ' + loadModel.toString();
document.getElementById('codeEditor4').value = loadModelCode;
utils.loadCode('codeSnippet5', 'codeEditor5');
let canvas = document.getElementById('canvasInput');
let ctx = canvas.getContext('2d');
let img = new Image();
img.crossOrigin = 'anonymous';
img.src = 'lena.png';
img.onload = function() {
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
};
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
initStatus();
document.getElementById('status').innerHTML = 'Running function main()...';
utils.executeCode('codeEditor');
utils.executeCode('codeEditor1');
if (modelPath === "") {
document.getElementById('status').innerHTML = 'Runing failed.';
utils.printError('Please upload model file by clicking the button first.');
} else {
setTimeout(main, 1);
}
});
let fileInput = document.getElementById('fileInput');
fileInput.addEventListener('change', (e) => {
initStatus();
loadImageToCanvas(e, 'canvasInput');
});
let configPath = "";
let configFile = document.getElementById('configFile');
configFile.addEventListener('change', async (e) => {
initStatus();
configPath = await loadModel(e);
document.getElementById('status').innerHTML = `The config file '${configPath}' is created successfully.`;
});
let modelPath = "";
let modelFile = document.getElementById('modelFile');
modelFile.addEventListener('change', async (e) => {
initStatus();
modelPath = await loadModel(e);
document.getElementById('status').innerHTML = `The model file '${modelPath}' is created successfully.`;
configPath = "";
configFile.value = "";
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
var main = async function() {};
var postProcess = function(result, labels) {};
utils.executeCode('codeEditor1');
utils.executeCode('codeEditor2');
utils.executeCode('codeEditor3');
utils.executeCode('codeEditor4');
utils.executeCode('codeEditor5');
function updateResult(output, time) {
try{
let canvasOutput = document.getElementById('canvasOutput');
canvasOutput.style.visibility = "visible";
cv.imshow('canvasOutput', output);
document.getElementById('status').innerHTML = `<b>Model:</b> ${modelPath}<br>
<b>Inference time:</b> ${time.toFixed(2)} ms`;
} catch(e) {
console.log(e);
}
}
function initStatus() {
document.getElementById('status').innerHTML = '';
document.getElementById('canvasOutput').style.visibility = "hidden";
utils.clearError();
}
</script>
</body>
</html>

View File

@@ -0,0 +1,39 @@
{
"caffe": [
{
"model": "mobilenet_SSD",
"inputSize": "300, 300",
"mean": "127.5, 127.5, 127.5",
"std": "0.007843",
"swapRB": "false",
"outType": "SSD",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/object_detection_classes_pascal_voc.txt",
"modelUrl": "https://raw.githubusercontent.com/chuanqi305/MobileNet-SSD/master/mobilenet_iter_73000.caffemodel",
"configUrl": "https://raw.githubusercontent.com/chuanqi305/MobileNet-SSD/master/deploy.prototxt"
},
{
"model": "VGG_SSD",
"inputSize": "300, 300",
"mean": "104, 117, 123",
"std": "1",
"swapRB": "false",
"outType": "SSD",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/object_detection_classes_pascal_voc.txt",
"modelUrl": "https://drive.google.com/uc?id=0BzKzrI_SkD1_WVVTSmQxU0dVRzA&export=download",
"configUrl": "https://drive.google.com/uc?id=0BzKzrI_SkD1_WVVTSmQxU0dVRzA&export=download"
}
],
"darknet": [
{
"model": "yolov2_tiny",
"inputSize": "416, 416",
"mean": "0, 0, 0",
"std": "0.00392",
"swapRB": "false",
"outType": "YOLO",
"labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/object_detection_classes_yolov3.txt",
"modelUrl": "https://pjreddie.com/media/files/yolov2-tiny.weights",
"configUrl": "https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov2-tiny.cfg"
}
]
}

View File

@@ -0,0 +1,402 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Object Detection Example with Camera</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Object Detection Example with Camera </h2>
<p>
This tutorial shows you how to write an object detection example with camera.<br>
To try the example you should click the <b>modelFile</b> button(and <b>configInput</b> button if needed) to upload inference model.
You can find the model URLs and parameters in the <a href="#appendix">model info</a> section.
Then You should change the parameters in the first code snippet according to the uploaded model.
Finally click <b>Start/Stop</b> button to start or stop the camera capture.<br>
</p>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="400" height="400"></video>
</td>
<td>
<canvas id="canvasOutput" style="visibility: hidden;" width="400" height="400"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">
videoInput
</div>
</td>
<td>
<p id='status' align="left"></p>
</td>
</tr>
<tr>
<td>
<div class="caption">
modelFile <input type="file" id="modelFile" name="file">
</div>
</td>
</tr>
<tr>
<td>
<div class="caption">
configFile <input type="file" id="configFile">
</div>
</td>
</tr>
</table>
</div>
<div>
<p class="err" id="errorMessage"></p>
</div>
<div>
<h3>Help function</h3>
<p>1.The parameters for model inference which you can modify to investigate more models.</p>
<textarea class="code" rows="15" cols="100" id="codeEditor" spellcheck="false"></textarea>
<p>2.The function to capture video from camera, and the main loop in which will do inference once.</p>
<textarea class="code" rows="34" cols="100" id="codeEditor1" spellcheck="false"></textarea>
<p>3.Load labels from txt file and process it into an array.</p>
<textarea class="code" rows="7" cols="100" id="codeEditor2" spellcheck="false"></textarea>
<p>4.Get blob from image as input for net, and standardize it with <b>mean</b> and <b>std</b>.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor3" spellcheck="false"></textarea>
<p>5.Fetch model file and save to emscripten file system once click the input button.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor4" spellcheck="false"></textarea>
<p>6.The post-processing, including get boxes from output and draw boxes into the image.</p>
<textarea class="code" rows="35" cols="100" id="codeEditor5" spellcheck="false"></textarea>
</div>
<div id="appendix">
<h2>Model Info:</h2>
</div>
<script src="utils.js" type="text/javascript"></script>
<script src="js_dnn_example_helper.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
inputSize = [300, 300];
mean = [127.5, 127.5, 127.5];
std = 0.007843;
swapRB = false;
confThreshold = 0.5;
nmsThreshold = 0.4;
// the type of output, can be YOLO or SSD
outType = "SSD";
// url for label file, can from local or Internet
labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/object_detection_classes_pascal_voc.txt";
</script>
<script id="codeSnippet1" type="text/code-snippet">
let frame = new cv.Mat(videoInput.height, videoInput.width, cv.CV_8UC4);
let cap = new cv.VideoCapture(videoInput);
main = async function(frame) {
const labels = await loadLables(labelsUrl);
const input = getBlobFromImage(inputSize, mean, std, swapRB, frame);
let net = cv.readNet(configPath, modelPath);
net.setInput(input);
const start = performance.now();
const result = net.forward();
const time = performance.now()-start;
const output = postProcess(result, labels, frame);
updateResult(output, time);
setTimeout(processVideo, 0);
input.delete();
net.delete();
result.delete();
}
function processVideo() {
try {
if (!streaming) {
return;
}
cap.read(frame);
main(frame);
} catch (err) {
utils.printError(err);
}
}
setTimeout(processVideo, 0);
</script>
<script id="codeSnippet5" type="text/code-snippet">
postProcess = function(result, labels, frame) {
let canvasOutput = document.getElementById('canvasOutput');
const outputWidth = canvasOutput.width;
const outputHeight = canvasOutput.height;
const resultData = result.data32F;
// Get the boxes(with class and confidence) from the output
let boxes = [];
switch(outType) {
case "YOLO": {
const vecNum = result.matSize[0];
const vecLength = result.matSize[1];
const classNum = vecLength - 5;
for (let i = 0; i < vecNum; ++i) {
let vector = resultData.slice(i*vecLength, (i+1)*vecLength);
let scores = vector.slice(5, vecLength);
let classId = scores.indexOf(Math.max(...scores));
let confidence = scores[classId];
if (confidence > confThreshold) {
let center_x = Math.round(vector[0] * outputWidth);
let center_y = Math.round(vector[1] * outputHeight);
let width = Math.round(vector[2] * outputWidth);
let height = Math.round(vector[3] * outputHeight);
let left = Math.round(center_x - width / 2);
let top = Math.round(center_y - height / 2);
let box = {
scores: scores,
classId: classId,
confidence: confidence,
bounding: [left, top, width, height],
toDraw: true
}
boxes.push(box);
}
}
// NMS(Non Maximum Suppression) algorithm
let boxNum = boxes.length;
let tmp_boxes = [];
let sorted_boxes = [];
for (let c = 0; c < classNum; ++c) {
for (let i = 0; i < boxes.length; ++i) {
tmp_boxes[i] = [boxes[i], i];
}
sorted_boxes = tmp_boxes.sort((a, b) => { return (b[0].scores[c] - a[0].scores[c]); });
for (let i = 0; i < boxNum; ++i) {
if (sorted_boxes[i][0].scores[c] === 0) continue;
else {
for (let j = i + 1; j < boxNum; ++j) {
if (IOU(sorted_boxes[i][0], sorted_boxes[j][0]) >= nmsThreshold) {
boxes[sorted_boxes[j][1]].toDraw = false;
}
}
}
}
}
} break;
case "SSD": {
const vecNum = result.matSize[2];
const vecLength = 7;
for (let i = 0; i < vecNum; ++i) {
let vector = resultData.slice(i*vecLength, (i+1)*vecLength);
let confidence = vector[2];
if (confidence > confThreshold) {
let left, top, right, bottom, width, height;
left = Math.round(vector[3]);
top = Math.round(vector[4]);
right = Math.round(vector[5]);
bottom = Math.round(vector[6]);
width = right - left + 1;
height = bottom - top + 1;
if (width <= 2 || height <= 2) {
left = Math.round(vector[3] * outputWidth);
top = Math.round(vector[4] * outputHeight);
right = Math.round(vector[5] * outputWidth);
bottom = Math.round(vector[6] * outputHeight);
width = right - left + 1;
height = bottom - top + 1;
}
let box = {
classId: vector[1] - 1,
confidence: confidence,
bounding: [left, top, width, height],
toDraw: true
}
boxes.push(box);
}
}
} break;
default:
console.error(`Unsupported output type ${outType}`)
}
// Draw the saved box into the image
let output = new cv.Mat(outputWidth, outputHeight, cv.CV_8UC3);
cv.cvtColor(frame, output, cv.COLOR_RGBA2RGB);
let boxNum = boxes.length;
for (let i = 0; i < boxNum; ++i) {
if (boxes[i].toDraw) {
drawBox(boxes[i]);
}
}
return output;
// Calculate the IOU(Intersection over Union) of two boxes
function IOU(box1, box2) {
let bounding1 = box1.bounding;
let bounding2 = box2.bounding;
let s1 = bounding1[2] * bounding1[3];
let s2 = bounding2[2] * bounding2[3];
let left1 = bounding1[0];
let right1 = left1 + bounding1[2];
let left2 = bounding2[0];
let right2 = left2 + bounding2[2];
let overlapW = calOverlap([left1, right1], [left2, right2]);
let top1 = bounding2[1];
let bottom1 = top1 + bounding1[3];
let top2 = bounding2[1];
let bottom2 = top2 + bounding2[3];
let overlapH = calOverlap([top1, bottom1], [top2, bottom2]);
let overlapS = overlapW * overlapH;
return overlapS / (s1 + s2 + overlapS);
}
// Calculate the overlap range of two vector
function calOverlap(range1, range2) {
let min1 = range1[0];
let max1 = range1[1];
let min2 = range2[0];
let max2 = range2[1];
if (min2 > min1 && min2 < max1) {
return max1 - min2;
} else if (max2 > min1 && max2 < max1) {
return max2 - min1;
} else {
return 0;
}
}
// Draw one predict box into the origin image
function drawBox(box) {
let bounding = box.bounding;
let left = bounding[0];
let top = bounding[1];
let width = bounding[2];
let height = bounding[3];
cv.rectangle(output, new cv.Point(left, top), new cv.Point(left + width, top + height),
new cv.Scalar(0, 255, 0));
cv.rectangle(output, new cv.Point(left, top), new cv.Point(left + width, top + 15),
new cv.Scalar(255, 255, 255), cv.FILLED);
let text = `${labels[box.classId]}: ${box.confidence.toFixed(4)}`;
cv.putText(output, text, new cv.Point(left, top + 10), cv.FONT_HERSHEY_SIMPLEX, 0.3,
new cv.Scalar(0, 0, 0));
}
}
</script>
<script type="text/javascript">
let jsonUrl = "js_object_detection_model_info.json";
drawInfoTable(jsonUrl, 'appendix');
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadCode('codeSnippet1', 'codeEditor1');
let loadLablesCode = 'loadLables = ' + loadLables.toString();
document.getElementById('codeEditor2').value = loadLablesCode;
let getBlobFromImageCode = 'getBlobFromImage = ' + getBlobFromImage.toString();
document.getElementById('codeEditor3').value = getBlobFromImageCode;
let loadModelCode = 'loadModel = ' + loadModel.toString();
document.getElementById('codeEditor4').value = loadModelCode;
utils.loadCode('codeSnippet5', 'codeEditor5');
let videoInput = document.getElementById('videoInput');
let streaming = false;
let startAndStop = document.getElementById('startAndStop');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
utils.startCamera('qvga', onVideoStarted, 'videoInput');
} else {
utils.stopCamera();
onVideoStopped();
}
});
let configPath = "";
let configFile = document.getElementById('configFile');
configFile.addEventListener('change', async (e) => {
initStatus();
configPath = await loadModel(e);
document.getElementById('status').innerHTML = `The config file '${configPath}' is created successfully.`;
});
let modelPath = "";
let modelFile = document.getElementById('modelFile');
modelFile.addEventListener('change', async (e) => {
initStatus();
modelPath = await loadModel(e);
document.getElementById('status').innerHTML = `The model file '${modelPath}' is created successfully.`;
configPath = "";
configFile.value = "";
});
utils.loadOpenCv(() => {
startAndStop.removeAttribute('disabled');
});
var main = async function(frame) {};
var postProcess = function(result, labels, frame) {};
utils.executeCode('codeEditor1');
utils.executeCode('codeEditor2');
utils.executeCode('codeEditor3');
utils.executeCode('codeEditor4');
utils.executeCode('codeEditor5');
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.width = videoInput.videoWidth;
videoInput.height = videoInput.videoHeight;
utils.executeCode('codeEditor');
utils.executeCode('codeEditor1');
}
function onVideoStopped() {
streaming = false;
startAndStop.innerText = 'Start';
initStatus();
}
function updateResult(output, time) {
try{
let canvasOutput = document.getElementById('canvasOutput');
canvasOutput.style.visibility = "visible";
cv.imshow('canvasOutput', output);
document.getElementById('status').innerHTML = `<b>Model:</b> ${modelPath}<br>
<b>Inference time:</b> ${time.toFixed(2)} ms`;
} catch(e) {
console.log(e);
}
}
function initStatus() {
document.getElementById('status').innerHTML = '';
document.getElementById('canvasOutput').style.visibility = "hidden";
utils.clearError();
}
</script>
</body>
</html>

View File

@@ -0,0 +1,163 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Dense Optical Flow Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Dense Optical Flow Example</h2>
<p>
Click <b>Start/Stop</b> button to start or stop the video.<br>
The <b>videoInput</b> is a &lt;video&gt; element used as input.
The <b>canvasOutput</b> is a &lt;canvas&gt; element used as output.<br>
We get a 2-channel array with optical flow vectors, (u,v). We find their magnitude and direction.
We color code the result for better visualization. Direction corresponds to Hue value of the image.
Magnitude corresponds to Value plane.<br>
The code of &lt;textarea&gt; will be executed when video is started.<br>
You can modify the code to investigate more.
</p>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false">
</textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="320" height="240" muted></video>
</td>
<td>
<canvas id="canvasOutput" width="320" height="240" ></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let cap = new cv.VideoCapture(video);
// take first frame of the video
let frame1 = new cv.Mat(video.height, video.width, cv.CV_8UC4);
cap.read(frame1);
let prvs = new cv.Mat();
cv.cvtColor(frame1, prvs, cv.COLOR_RGBA2GRAY);
frame1.delete();
let hsv = new cv.Mat();
let hsv0 = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let hsv1 = new cv.Mat(video.height, video.width, cv.CV_8UC1, new cv.Scalar(255));
let hsv2 = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let hsvVec = new cv.MatVector();
hsvVec.push_back(hsv0); hsvVec.push_back(hsv1); hsvVec.push_back(hsv2);
let frame2 = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let next = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let flow = new cv.Mat(video.height, video.width, cv.CV_32FC2);
let flowVec = new cv.MatVector();
let mag = new cv.Mat(video.height, video.width, cv.CV_32FC1);
let ang = new cv.Mat(video.height, video.width, cv.CV_32FC1);
let rgb = new cv.Mat(video.height, video.width, cv.CV_8UC3);
const FPS = 30;
function processVideo() {
try {
if (!streaming) {
// clean and stop.
prvs.delete(); hsv.delete(); hsv0.delete(); hsv1.delete(); hsv2.delete();
hsvVec.delete(); frame2.delete(); flow.delete(); flowVec.delete(); next.delete();
mag.delete(); ang.delete(); rgb.delete();
return;
}
let begin = Date.now();
// start processing.
cap.read(frame2);
cv.cvtColor(frame2, next, cv.COLOR_RGBA2GRAY);
cv.calcOpticalFlowFarneback(prvs, next, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
cv.split(flow, flowVec);
let u = flowVec.get(0);
let v = flowVec.get(1);
cv.cartToPolar(u, v, mag, ang);
u.delete(); v.delete();
ang.convertTo(hsv0, cv.CV_8UC1, 180/Math.PI/2);
cv.normalize(mag, hsv2, 0, 255, cv.NORM_MINMAX, cv.CV_8UC1);
cv.merge(hsvVec, hsv);
cv.cvtColor(hsv, rgb, cv.COLOR_HSV2RGB);
cv.imshow('canvasOutput', rgb);
next.copyTo(prvs);
// schedule the next one.
let delay = 1000/FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
} catch (err) {
utils.printError(err);
}
};
// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
videoInput.play().then(() => {
onVideoStarted();
});
} else {
videoInput.pause();
videoInput.currentTime = 0;
onVideoStopped();
}
});
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
utils.executeCode('codeEditor');
}
function onVideoStopped() {
streaming = false;
startAndStop.innerText = 'Start';
}
videoInput.addEventListener('ended', () => {
onVideoStopped();
});
utils.loadOpenCv(() => {
videoInput.addEventListener('canplay', () => {
startAndStop.removeAttribute('disabled');
});
videoInput.src = 'box.mp4';
});
</script>
</body>
</html>

View File

@@ -0,0 +1,190 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Lucas-Kanade Optical Flow Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Lucas-Kanade Optical Flow Example</h2>
<p>
Click <b>Start/Stop</b> button to start or stop the video.<br>
The <b>videoInput</b> is a &lt;video&gt; element used as input.
The <b>canvasOutput</b> is a &lt;canvas&gt; element used as output.<br>
To decide the points, we use <b>cv.goodFeaturesToTrack()</b>. We take the first frame, detect some Shi-Tomasi corner points in it, then we iteratively track those points using <b>cv.calcOpticalFlowPyrLK</b>.<br>
The code of &lt;textarea&gt; will be executed when video is started.<br>
You can modify the code to investigate more.
</p>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false">
</textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="320" height="240" muted></video>
</td>
<td>
<canvas id="canvasOutput" width="320" height="240" ></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let cap = new cv.VideoCapture(video);
// parameters for ShiTomasi corner detection
let [maxCorners, qualityLevel, minDistance, blockSize] = [30, 0.3, 7, 7];
// parameters for lucas kanade optical flow
let winSize = new cv.Size(15, 15);
let maxLevel = 2;
let criteria = new cv.TermCriteria(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03);
// create some random colors
let color = [];
for (let i = 0; i < maxCorners; i++) {
color.push(new cv.Scalar(parseInt(Math.random()*255), parseInt(Math.random()*255),
parseInt(Math.random()*255), 255));
}
// take first frame and find corners in it
let oldFrame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
cap.read(oldFrame);
let oldGray = new cv.Mat();
cv.cvtColor(oldFrame, oldGray, cv.COLOR_RGB2GRAY);
let p0 = new cv.Mat();
let none = new cv.Mat();
cv.goodFeaturesToTrack(oldGray, p0, maxCorners, qualityLevel, minDistance, none, blockSize);
// Create a mask image for drawing purposes
let zeroEle = new cv.Scalar(0, 0, 0, 255);
let mask = new cv.Mat(oldFrame.rows, oldFrame.cols, oldFrame.type(), zeroEle);
let frame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let frameGray = new cv.Mat();
let p1 = new cv.Mat();
let st = new cv.Mat();
let err = new cv.Mat();
const FPS = 30;
function processVideo() {
try {
if (!streaming) {
// clean and stop.
frame.delete(); oldGray.delete(); p0.delete(); p1.delete(); err.delete(); mask.delete();
return;
}
let begin = Date.now();
// start processing.
cap.read(frame);
cv.cvtColor(frame, frameGray, cv.COLOR_RGBA2GRAY);
// calculate optical flow
cv.calcOpticalFlowPyrLK(oldGray, frameGray, p0, p1, st, err, winSize, maxLevel, criteria);
// select good points
let goodNew = [];
let goodOld = [];
for (let i = 0; i < st.rows; i++) {
if (st.data[i] === 1) {
goodNew.push(new cv.Point(p1.data32F[i*2], p1.data32F[i*2+1]));
goodOld.push(new cv.Point(p0.data32F[i*2], p0.data32F[i*2+1]));
}
}
// draw the tracks
for (let i = 0; i < goodNew.length; i++) {
cv.line(mask, goodNew[i], goodOld[i], color[i], 2);
cv.circle(frame, goodNew[i], 5, color[i], -1);
}
cv.add(frame, mask, frame);
cv.imshow('canvasOutput', frame);
// now update the previous frame and previous points
frameGray.copyTo(oldGray);
p0.delete(); p0 = null;
p0 = new cv.Mat(goodNew.length, 1, cv.CV_32FC2);
for (let i = 0; i < goodNew.length; i++) {
p0.data32F[i*2] = goodNew[i].x;
p0.data32F[i*2+1] = goodNew[i].y;
}
// schedule the next one.
let delay = 1000/FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
} catch (err) {
utils.printError(err);
}
};
// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
videoInput.play().then(() => {
onVideoStarted();
});
} else {
videoInput.pause();
videoInput.currentTime = 0;
onVideoStopped();
}
});
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
utils.executeCode('codeEditor');
}
function onVideoStopped() {
streaming = false;
startAndStop.innerText = 'Start';
}
videoInput.addEventListener('ended', () => {
onVideoStopped();
});
utils.loadOpenCv(() => {
videoInput.addEventListener('canplay', () => {
startAndStop.removeAttribute('disabled');
});
videoInput.src = 'box.mp4';
});
</script>
</body>
</html>

View File

@@ -0,0 +1,327 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Pose Estimation Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Pose Estimation Example</h2>
<p>
This tutorial shows you how to write an pose estimation example with OpenCV.js.<br>
To try the example you should click the <b>modelFile</b> button(and <b>configInput</b> button if needed) to upload inference model.
You can find the model URLs and parameters in the <a href="#appendix">model info</a> section.
Then You should change the parameters in the first code snippet according to the uploaded model.
Finally click <b>Try it</b> button to see the result. You can choose any other images.<br>
</p>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput" width="400" height="250"></canvas>
</td>
<td>
<canvas id="canvasOutput" style="visibility: hidden;" width="400" height="250"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">
canvasInput <input type="file" id="fileInput" name="file" accept="image/*">
</div>
</td>
<td>
<p id='status' align="left"></p>
</td>
</tr>
<tr>
<td>
<div class="caption">
modelFile <input type="file" id="modelFile" name="file">
</div>
</td>
</tr>
<tr>
<td>
<div class="caption">
configFile <input type="file" id="configFile">
</div>
</td>
</tr>
</table>
</div>
<div>
<p class="err" id="errorMessage"></p>
</div>
<div>
<h3>Help function</h3>
<p>1.The parameters for model inference which you can modify to investigate more models.</p>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false"></textarea>
<p>2.Main loop in which will read the image from canvas and do inference once.</p>
<textarea class="code" rows="15" cols="100" id="codeEditor1" spellcheck="false"></textarea>
<p>3.Get blob from image as input for net, and standardize it with <b>mean</b> and <b>std</b>.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor2" spellcheck="false"></textarea>
<p>4.Fetch model file and save to emscripten file system once click the input button.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor3" spellcheck="false"></textarea>
<p>5.The pairs of keypoints of different dataset.</p>
<textarea class="code" rows="30" cols="100" id="codeEditor4" spellcheck="false"></textarea>
<p>6.The post-processing, including get the predicted points and draw lines into the image.</p>
<textarea class="code" rows="30" cols="100" id="codeEditor5" spellcheck="false"></textarea>
</div>
<div id="appendix">
<h2>Model Info:</h2>
</div>
<script src="utils.js" type="text/javascript"></script>
<script src="js_dnn_example_helper.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
inputSize = [368, 368];
mean = [0, 0, 0];
std = 0.00392;
swapRB = false;
threshold = 0.1;
// the pairs of keypoint, can be "COCO", "MPI" and "BODY_25"
dataset = "COCO";
</script>
<script id="codeSnippet1" type="text/code-snippet">
main = async function() {
const input = getBlobFromImage(inputSize, mean, std, swapRB, 'canvasInput');
let net = cv.readNet(configPath, modelPath);
net.setInput(input);
const start = performance.now();
const result = net.forward();
const time = performance.now()-start;
const output = postProcess(result);
updateResult(output, time);
input.delete();
net.delete();
result.delete();
}
</script>
<script id="codeSnippet4" type="text/code-snippet">
BODY_PARTS = {};
POSE_PAIRS = [];
if (dataset === 'COCO') {
BODY_PARTS = { "Nose": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4,
"LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9,
"RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "REye": 14,
"LEye": 15, "REar": 16, "LEar": 17, "Background": 18 };
POSE_PAIRS = [ ["Neck", "RShoulder"], ["Neck", "LShoulder"], ["RShoulder", "RElbow"],
["RElbow", "RWrist"], ["LShoulder", "LElbow"], ["LElbow", "LWrist"],
["Neck", "RHip"], ["RHip", "RKnee"], ["RKnee", "RAnkle"], ["Neck", "LHip"],
["LHip", "LKnee"], ["LKnee", "LAnkle"], ["Neck", "Nose"], ["Nose", "REye"],
["REye", "REar"], ["Nose", "LEye"], ["LEye", "LEar"] ]
} else if (dataset === 'MPI') {
BODY_PARTS = { "Head": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4,
"LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9,
"RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "Chest": 14,
"Background": 15 }
POSE_PAIRS = [ ["Head", "Neck"], ["Neck", "RShoulder"], ["RShoulder", "RElbow"],
["RElbow", "RWrist"], ["Neck", "LShoulder"], ["LShoulder", "LElbow"],
["LElbow", "LWrist"], ["Neck", "Chest"], ["Chest", "RHip"], ["RHip", "RKnee"],
["RKnee", "RAnkle"], ["Chest", "LHip"], ["LHip", "LKnee"], ["LKnee", "LAnkle"] ]
} else if (dataset === 'BODY_25') {
BODY_PARTS = { "Nose": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4,
"LShoulder": 5, "LElbow": 6, "LWrist": 7, "MidHip": 8, "RHip": 9,
"RKnee": 10, "RAnkle": 11, "LHip": 12, "LKnee": 13, "LAnkle": 14,
"REye": 15, "LEye": 16, "REar": 17, "LEar": 18, "LBigToe": 19,
"LSmallToe": 20, "LHeel": 21, "RBigToe": 22, "RSmallToe": 23,
"RHeel": 24, "Background": 25 }
POSE_PAIRS = [ ["Neck", "Nose"], ["Neck", "RShoulder"],
["Neck", "LShoulder"], ["RShoulder", "RElbow"],
["RElbow", "RWrist"], ["LShoulder", "LElbow"],
["LElbow", "LWrist"], ["Nose", "REye"],
["REye", "REar"], ["Nose", "LEye"],
["LEye", "LEar"], ["Neck", "MidHip"],
["MidHip", "RHip"], ["RHip", "RKnee"],
["RKnee", "RAnkle"], ["RAnkle", "RBigToe"],
["RBigToe", "RSmallToe"], ["RAnkle", "RHeel"],
["MidHip", "LHip"], ["LHip", "LKnee"],
["LKnee", "LAnkle"], ["LAnkle", "LBigToe"],
["LBigToe", "LSmallToe"], ["LAnkle", "LHeel"] ]
}
</script>
<script id="codeSnippet5" type="text/code-snippet">
postProcess = function(result) {
const resultData = result.data32F;
const matSize = result.matSize;
const size1 = matSize[1];
const size2 = matSize[2];
const size3 = matSize[3];
const mapSize = size2 * size3;
let canvasOutput = document.getElementById('canvasOutput');
const outputWidth = canvasOutput.width;
const outputHeight = canvasOutput.height;
let image = cv.imread("canvasInput");
let output = new cv.Mat(outputWidth, outputHeight, cv.CV_8UC3);
cv.cvtColor(image, output, cv.COLOR_RGBA2RGB);
// get position of keypoints from output
let points = [];
for (let i = 0; i < Object.keys(BODY_PARTS).length; ++i) {
heatMap = resultData.slice(i*mapSize, (i+1)*mapSize);
let maxIndex = 0;
let maxConf = heatMap[0];
for (index in heatMap) {
if (heatMap[index] > heatMap[maxIndex]) {
maxIndex = index;
maxConf = heatMap[index];
}
}
if (maxConf > threshold) {
indexX = maxIndex % size3;
indexY = maxIndex / size3;
x = outputWidth * indexX / size3;
y = outputHeight * indexY / size2;
points[i] = [Math.round(x), Math.round(y)];
}
}
// draw the points and lines into the image
for (pair of POSE_PAIRS) {
partFrom = pair[0];
partTo = pair[1];
idFrom = BODY_PARTS[partFrom];
idTo = BODY_PARTS[partTo];
pointFrom = points[idFrom];
pointTo = points[idTo];
if (points[idFrom] && points[idTo]) {
cv.line(output, new cv.Point(pointFrom[0], pointFrom[1]),
new cv.Point(pointTo[0], pointTo[1]), new cv.Scalar(0, 255, 0), 3);
cv.ellipse(output, new cv.Point(pointFrom[0], pointFrom[1]), new cv.Size(3, 3), 0, 0, 360,
new cv.Scalar(0, 0, 255), cv.FILLED);
cv.ellipse(output, new cv.Point(pointTo[0], pointTo[1]), new cv.Size(3, 3), 0, 0, 360,
new cv.Scalar(0, 0, 255), cv.FILLED);
}
}
return output;
}
</script>
<script type="text/javascript">
let jsonUrl = "js_pose_estimation_model_info.json";
drawInfoTable(jsonUrl, 'appendix');
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadCode('codeSnippet1', 'codeEditor1');
let getBlobFromImageCode = 'getBlobFromImage = ' + getBlobFromImage.toString();
document.getElementById('codeEditor2').value = getBlobFromImageCode;
let loadModelCode = 'loadModel = ' + loadModel.toString();
document.getElementById('codeEditor3').value = loadModelCode;
utils.loadCode('codeSnippet4', 'codeEditor4');
utils.loadCode('codeSnippet5', 'codeEditor5');
let canvas = document.getElementById('canvasInput');
let ctx = canvas.getContext('2d');
let img = new Image();
img.crossOrigin = 'anonymous';
img.src = 'roi.jpg';
img.onload = function() {
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
};
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
initStatus();
document.getElementById('status').innerHTML = 'Running function main()...';
utils.executeCode('codeEditor');
utils.executeCode('codeEditor1');
if (modelPath === "") {
document.getElementById('status').innerHTML = 'Runing failed.';
utils.printError('Please upload model file by clicking the button first.');
} else {
setTimeout(main, 1);
}
});
let fileInput = document.getElementById('fileInput');
fileInput.addEventListener('change', (e) => {
initStatus();
loadImageToCanvas(e, 'canvasInput');
});
let configPath = "";
let configFile = document.getElementById('configFile');
configFile.addEventListener('change', async (e) => {
initStatus();
configPath = await loadModel(e);
document.getElementById('status').innerHTML = `The config file '${configPath}' is created successfully.`;
});
let modelPath = "";
let modelFile = document.getElementById('modelFile');
modelFile.addEventListener('change', async (e) => {
initStatus();
modelPath = await loadModel(e);
document.getElementById('status').innerHTML = `The model file '${modelPath}' is created successfully.`;
configPath = "";
configFile.value = "";
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
var main = async function() {};
var postProcess = function(result) {};
utils.executeCode('codeEditor');
utils.executeCode('codeEditor1');
utils.executeCode('codeEditor2');
utils.executeCode('codeEditor3');
utils.executeCode('codeEditor4');
utils.executeCode('codeEditor5');
function updateResult(output, time) {
try{
let canvasOutput = document.getElementById('canvasOutput');
canvasOutput.style.visibility = "visible";
let resized = new cv.Mat(canvasOutput.width, canvasOutput.height, cv.CV_8UC4);
cv.resize(output, resized, new cv.Size(canvasOutput.width, canvasOutput.height));
cv.imshow('canvasOutput', resized);
document.getElementById('status').innerHTML = `<b>Model:</b> ${modelPath}<br>
<b>Inference time:</b> ${time.toFixed(2)} ms`;
} catch(e) {
console.log(e);
}
}
function initStatus() {
document.getElementById('status').innerHTML = '';
document.getElementById('canvasOutput').style.visibility = "hidden";
utils.clearError();
}
</script>
</body>
</html>

View File

@@ -0,0 +1,34 @@
{
"caffe": [
{
"model": "body_25",
"inputSize": "368, 368",
"mean": "0, 0, 0",
"std": "0.00392",
"swapRB": "false",
"dataset": "BODY_25",
"modelUrl": "http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/body_25/pose_iter_584000.caffemodel",
"configUrl": "https://raw.githubusercontent.com/CMU-Perceptual-Computing-Lab/openpose/master/models/pose/body_25/pose_deploy.prototxt"
},
{
"model": "coco",
"inputSize": "368, 368",
"mean": "0, 0, 0",
"std": "0.00392",
"swapRB": "false",
"dataset": "COCO",
"modelUrl": "http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/coco/pose_iter_440000.caffemodel",
"configUrl": "https://raw.githubusercontent.com/CMU-Perceptual-Computing-Lab/openpose/master/models/pose/coco/pose_deploy_linevec.prototxt"
},
{
"model": "mpi",
"inputSize": "368, 368",
"mean": "0, 0, 0",
"std": "0.00392",
"swapRB": "false",
"dataset": "MPI",
"modelUrl": "http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/mpi/pose_iter_160000.caffemodel",
"configUrl": "https://raw.githubusercontent.com/CMU-Perceptual-Computing-Lab/openpose/master/models/pose/mpi/pose_deploy_linevec.prototxt"
}
]
}

View File

@@ -0,0 +1,67 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image PyrDown Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image PyrDown Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// You can try more different parameters
cv.pyrDown(src, dst, new cv.Size(0, 0), cv.BORDER_DEFAULT);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,67 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Image PyrUp Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Image PyrUp Example</h2>
<p>
&lt;canvas&gt; elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
Click <b>Try it</b> button to see the result. You can choose another image.<br>
You can change the code in the &lt;textarea&gt; to investigate more.
</p>
<div>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
</textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script src="utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// You can try more different parameters
cv.pyrUp(src, dst, new cv.Size(0, 0), cv.BORDER_DEFAULT);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>

View File

@@ -0,0 +1,243 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Semantic Segmentation Example</title>
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>Semantic Segmentation Example</h2>
<p>
This tutorial shows you how to write an semantic segmentation example with OpenCV.js.<br>
To try the example you should click the <b>modelFile</b> button(and <b>configInput</b> button if needed) to upload inference model.
You can find the model URLs and parameters in the <a href="#appendix">model info</a> section.
Then You should change the parameters in the first code snippet according to the uploaded model.
Finally click <b>Try it</b> button to see the result. You can choose any other images.<br>
</p>
<div class="control"><button id="tryIt" disabled>Try it</button></div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput" width="400" height="400"></canvas>
</td>
<td>
<canvas id="canvasOutput" style="visibility: hidden;" width="400" height="400"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">
canvasInput <input type="file" id="fileInput" name="file" accept="image/*">
</div>
</td>
<td>
<p id='status' align="left"></p>
</td>
</tr>
<tr>
<td>
<div class="caption">
modelFile <input type="file" id="modelFile" name="file">
</div>
</td>
</tr>
<tr>
<td>
<div class="caption">
configFile <input type="file" id="configFile">
</div>
</td>
</tr>
</table>
</div>
<div>
<p class="err" id="errorMessage"></p>
</div>
<div>
<h3>Help function</h3>
<p>1.The parameters for model inference which you can modify to investigate more models.</p>
<textarea class="code" rows="5" cols="100" id="codeEditor" spellcheck="false"></textarea>
<p>2.Main loop in which will read the image from canvas and do inference once.</p>
<textarea class="code" rows="16" cols="100" id="codeEditor1" spellcheck="false"></textarea>
<p>3.Get blob from image as input for net, and standardize it with <b>mean</b> and <b>std</b>.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor2" spellcheck="false"></textarea>
<p>4.Fetch model file and save to emscripten file system once click the input button.</p>
<textarea class="code" rows="17" cols="100" id="codeEditor3" spellcheck="false"></textarea>
<p>5.The post-processing, including gengerate colors for different classes and argmax to get the classes for each pixel.</p>
<textarea class="code" rows="34" cols="100" id="codeEditor4" spellcheck="false"></textarea>
</div>
<div id="appendix">
<h2>Model Info:</h2>
</div>
<script src="utils.js" type="text/javascript"></script>
<script src="js_dnn_example_helper.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
inputSize = [513, 513];
mean = [127.5, 127.5, 127.5];
std = 0.007843;
swapRB = false;
</script>
<script id="codeSnippet1" type="text/code-snippet">
main = async function() {
const input = getBlobFromImage(inputSize, mean, std, swapRB, 'canvasInput');
let net = cv.readNet(configPath, modelPath);
net.setInput(input);
const start = performance.now();
const result = net.forward();
const time = performance.now()-start;
const colors = generateColors(result);
const output = argmax(result, colors);
updateResult(output, time);
input.delete();
net.delete();
result.delete();
}
</script>
<script id="codeSnippet4" type="text/code-snippet">
generateColors = function(result) {
const numClasses = result.matSize[1];
let colors = [0,0,0];
while(colors.length < numClasses*3){
colors.push(Math.round((Math.random()*255 + colors[colors.length-3]) / 2));
}
return colors;
}
argmax = function(result, colors) {
const C = result.matSize[1];
const H = result.matSize[2];
const W = result.matSize[3];
const resultData = result.data32F;
const imgSize = H*W;
let classId = [];
for (i = 0; i<imgSize; ++i) {
let id = 0;
for (j = 0; j < C; ++j) {
if (resultData[j*imgSize+i] > resultData[id*imgSize+i]) {
id = j;
}
}
classId.push(colors[id*3]);
classId.push(colors[id*3+1]);
classId.push(colors[id*3+2]);
classId.push(255);
}
output = cv.matFromArray(H,W,cv.CV_8UC4,classId);
return output;
}
</script>
<script type="text/javascript">
let jsonUrl = "js_semantic_segmentation_model_info.json";
drawInfoTable(jsonUrl, 'appendix');
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadCode('codeSnippet1', 'codeEditor1');
let getBlobFromImageCode = 'getBlobFromImage = ' + getBlobFromImage.toString();
document.getElementById('codeEditor2').value = getBlobFromImageCode;
let loadModelCode = 'loadModel = ' + loadModel.toString();
document.getElementById('codeEditor3').value = loadModelCode;
utils.loadCode('codeSnippet4', 'codeEditor4');
let canvas = document.getElementById('canvasInput');
let ctx = canvas.getContext('2d');
let img = new Image();
img.crossOrigin = 'anonymous';
img.src = 'roi.jpg';
img.onload = function() {
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
};
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
initStatus();
document.getElementById('status').innerHTML = 'Running function main()...';
utils.executeCode('codeEditor');
utils.executeCode('codeEditor1');
if (modelPath === "") {
document.getElementById('status').innerHTML = 'Runing failed.';
utils.printError('Please upload model file by clicking the button first.');
} else {
setTimeout(main, 1);
}
});
let fileInput = document.getElementById('fileInput');
fileInput.addEventListener('change', (e) => {
initStatus();
loadImageToCanvas(e, 'canvasInput');
});
let configPath = "";
let configFile = document.getElementById('configFile');
configFile.addEventListener('change', async (e) => {
initStatus();
configPath = await loadModel(e);
document.getElementById('status').innerHTML = `The config file '${configPath}' is created successfully.`;
});
let modelPath = "";
let modelFile = document.getElementById('modelFile');
modelFile.addEventListener('change', async (e) => {
initStatus();
modelPath = await loadModel(e);
document.getElementById('status').innerHTML = `The model file '${modelPath}' is created successfully.`;
configPath = "";
configFile.value = "";
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
var main = async function() {};
var generateColors = function(result) {};
var argmax = function(result, colors) {};
utils.executeCode('codeEditor1');
utils.executeCode('codeEditor2');
utils.executeCode('codeEditor3');
utils.executeCode('codeEditor4');
function updateResult(output, time) {
try{
let canvasOutput = document.getElementById('canvasOutput');
canvasOutput.style.visibility = "visible";
let resized = new cv.Mat(canvasOutput.width, canvasOutput.height, cv.CV_8UC4);
cv.resize(output, resized, new cv.Size(canvasOutput.width, canvasOutput.height));
cv.imshow('canvasOutput', resized);
document.getElementById('status').innerHTML = `<b>Model:</b> ${modelPath}<br>
<b>Inference time:</b> ${time.toFixed(2)} ms`;
} catch(e) {
console.log(e);
}
}
function initStatus() {
document.getElementById('status').innerHTML = '';
document.getElementById('canvasOutput').style.visibility = "hidden";
utils.clearError();
}
</script>
</body>
</html>

Some files were not shown because too many files have changed in this diff Show More