diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..7c54bccd81b69f45b53501bd1d2e5c7b853e5e1d --- /dev/null +++ b/Dockerfile @@ -0,0 +1,11 @@ +FROM python:3.9 + +WORKDIR /code + +COPY ./requirements.txt /code/requirements.txt + +RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt + +COPY . . + +CMD ["uvicorn", "main:classify_webcam", "-b", "0.0.0.0:7860"] \ No newline at end of file diff --git a/README.md b/README.md index eb36f9eaf35db9c8058b992839eb1b33acc9d157..fe41e01108b0447980d238c190f27ba6e3bcf810 100644 --- a/README.md +++ b/README.md @@ -9,3 +9,135 @@ license: apache-2.0 --- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference + + +
+ + +
+
+ + +

Sign Language Image Recognition System + Web App

+ +

+ An image recognition system developed with python and ML libraries to detect sign language gestures. It is connected along with a web application using flask and it was developed for an academic project. +
+ Explore the project ยป +
+

+
+ + + + +
+ Table of Contents +
    +
  1. + About The Project + +
  2. +
  3. + Getting Started + +
  4. +
  5. Usage
  6. +
  7. Contributing
  8. +
  9. Contributors
  10. +
  11. License
  12. +
  13. Contact
  14. +
+
+ + + + +## About The Project + +[![Product Name Screen Shot][product-screenshot]] + +

(back to top)

+ + + +### Built With + +* [Python] +* [OpenCV] +* [TensorFlow] +* [Numpy] +* [Flask] +* [HTML] +* [CSS] +* [JavaScript] + +

(back to top)

+ + + + +## Getting Started + +To get a local copy up and running follow these simple example steps. + +### Prerequisites + +This project is built using Python. You may need to have installed python and pip to install required packages. + +### Installation + +python3 train.py \ + --bottleneck_dir=logs/bottlenecks \ + --how_many_training_steps=2000 \ + --model_dir=inception \ + --summaries_dir=logs/training_summaries/basic \ + --output_graph=logs/trained_graph.pb \ + --output_labels=logs/trained_labels.txt \ + --image_dir=./dataset +``` +5. Run the web application +``` +python3 classify_webcam.py +``` + + +

(back to top)

+ + + + +## Usage + +This project can be used to recognize sign language gestures. + +

(back to top)

+ + + + + +## Contributing + +Contributions are what make the open source community such an amazing place to learn, inspire, and create. Any contributions you make are **greatly appreciated**. + +If you have a suggestion that would make this better, please fork the repo and create a pull request. You can also simply open an issue with the tag "enhancement". +Don't forget to give the project a star! Thanks again! + +1. Fork the Project +2. Create your Feature Branch (`git checkout -b feature/AmazingFeature`) +3. Commit your Changes (`git commit -m 'Add some AmazingFeature'`) +4. Push to the Branch (`git push origin feature/AmazingFeature`) +5. Open a Pull Request + +

(back to top)

+ + + +## Contributors + diff --git a/classify.py b/classify.py new file mode 100644 index 0000000000000000000000000000000000000000..e87550283bf8f025e96a6fc288ca6c8c1566dfce --- /dev/null +++ b/classify.py @@ -0,0 +1,39 @@ +import tensorflow as tf +import sys +import os + + +# Disable tensorflow compilation warnings +os.environ['TF_CPP_MIN_LOG_LEVEL']='2' +import tensorflow as tf + +image_path = sys.argv[1] + +# Read the image_data +image_data = tf.gfile.FastGFile(image_path, 'rb').read() + + +# Loads label file, strips off carriage return +label_lines = [line.rstrip() for line + in tf.gfile.GFile("logs/output_labels.txt")] + +# Unpersists graph from file +with tf.gfile.FastGFile("logs/output_graph.pb", 'rb') as f: + graph_def = tf.GraphDef() + graph_def.ParseFromString(f.read()) + _ = tf.import_graph_def(graph_def, name='') + +with tf.Session() as sess: + # Feed the image_data as input to the graph and get first prediction + softmax_tensor = sess.graph.get_tensor_by_name('final_result:0') + + predictions = sess.run(softmax_tensor, \ + {'DecodeJpeg/contents:0': image_data}) + + # Sort to show labels of first prediction in order of confidence + top_k = predictions[0].argsort()[-len(predictions[0]):][::-1] + + for node_id in top_k: + human_string = label_lines[node_id] + score = predictions[0][node_id] + print('%s (score = %.5f)' % (human_string, score)) diff --git a/classify_webcam.py b/classify_webcam.py new file mode 100644 index 0000000000000000000000000000000000000000..6640abfccd24de49bb1c314d1e1973629b7becd3 --- /dev/null +++ b/classify_webcam.py @@ -0,0 +1,145 @@ +import sys +import os + +import flask +import matplotlib +import numpy as np +import matplotlib.pyplot as plt +import copy +import cv2 +import random +import tensorflow.compat.v1 as tf +tf.disable_v2_behavior() + +from re import I +from flask import Flask, render_template, request, redirect, url_for, flash, jsonify +from flask_cors import CORS, cross_origin +from flask import send_from_directory +import base64 +from PIL import Image +from io import BytesIO + +app = Flask(__name__) +cors = CORS(app) +app.config['CORS_HEADERS'] = 'Content-Type' + +# Disable tensorflow compilation warnings +os.environ['TF_CPP_MIN_LOG_LEVEL']='2' +#import tensorflow as tf + +def predict(image_data): + + predictions = sess.run(softmax_tensor, \ + {'DecodeJpeg/contents:0': image_data}) + + # Sort to show labels of first prediction in order of confidence + top_k = predictions[0].argsort()[-len(predictions[0]):][::-1] + + max_score = 0.0 + res = '' + for node_id in top_k: + human_string = label_lines[node_id] + score = predictions[0][node_id] + if score > max_score: + max_score = score + res = human_string + return res, max_score + +# Loads label file, strips off carriage return +label_lines = [line.rstrip() for line + in tf.gfile.GFile("logs/trained_labels.txt")] + +# Unpersists graph from file +with tf.gfile.FastGFile("logs/trained_graph.pb", 'rb') as f: + graph_def = tf.GraphDef() + graph_def.ParseFromString(f.read()) + _ = tf.import_graph_def(graph_def, name='') + + +sess = tf.Session() + # Feed the image_data as input to the graph and get first prediction +softmax_tensor = sess.graph.get_tensor_by_name('final_result:0') + +def imageRead (random_name): + c = 0 + global sess + global softmax_tensor + + #cap = cv2.VideoCapture(0) + + res, score = '', 0.0 + i = 0 + mem = '' + consecutive = 0 + sequence = '' + + while True: + img = cv2.imread('temp_img/'+random_name) + img = cv2.flip(img, 1) + + + #x1, y1, x2, y2 = 200, 200, 600, 600 + #img_cropped = img[y1:y2, x1:x2] + + c += 1 + image_data = cv2.imencode('.jpg', img)[1].tostring() + + a = cv2.waitKey(1) # waits to see if `esc` is pressed + + res_tmp, score = predict(image_data) + res = res_tmp + + print(res) + return res; + #cv2.putText(img, '%s' % (res.upper()), (100,400), cv2.FONT_HERSHEY_SIMPLEX, 4, (255,255,255), 4) + #cv2.putText(img, '(score = %.5f)' % (float(score)), (100,450), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255)) + #mem = res + #cv2.rectangle(img, (x1, y1), (x2, y2), (255,0,0), 2) + #cv2.imshow("img", img) + #img_sequence = np.zeros((200,1200,3), np.uint8) + #cv2.putText(img_sequence, '%s' % (sequence.upper()), (30,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2) + #cv2.imshow('sequence', img_sequence) + + #if a == 27: # when `esc` is pressed + # break + +@app.route('/image', methods=['GET', 'POST']) +@cross_origin() +def image(): + req = request.get_json() + random_name = "test" + '.jpg' + image_data = req['image_data'].split(',')[1] + im = Image.open(BytesIO(base64.b64decode(image_data))) + im.save('temp_img/'+random_name, 'JPEG') + + imageData = imageRead(random_name) + return '{"status":1, "value": "'+imageData+'"}'; + +@app.route('/') +@cross_origin() +def homePage(): + return render_template('index.html') + +@app.route("/audio/") +def static_dir(path): + return flask.send_file("templates/audio/" + path) + +@app.route('/image-upload', methods=['GET', 'POST']) +@cross_origin() +def imageUpload(): + req = request.get_json() + random_name = str( random.randint(1, 9999999) )+ '.jpg' + image_data = req['image_data'].split(',')[1] + im = Image.open(BytesIO(base64.b64decode(image_data))) + im.save('temp_img/'+random_name, 'JPEG') + + imageData = imageRead(random_name) + return '{"status":1, "value": "'+imageData+'"}'; + + +if __name__ == '__main__': + app.run(debug=True) + +# Following line should... <-- This should work fine now +# cv2.destroyAllWindows() +# cv2.VideoCapture(0).release() \ No newline at end of file diff --git a/images/logo.png b/images/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..0f38ba9369899ff527b5b81b77c7518185f8a18a Binary files /dev/null and b/images/logo.png differ diff --git a/images/screenshot.png b/images/screenshot.png new file mode 100644 index 0000000000000000000000000000000000000000..2fca9c672ee7c406e2b982ce047a65fd69f36829 Binary files /dev/null and b/images/screenshot.png differ diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d18e5773ca5219709887717ccd591a3ad33b8d71 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,9 @@ +requirements + +> wheel = 0.37.1 +> flask = 2.1.2 +> flask-cors = 3.0.10 +> matplotlib = 3.5.2 +> numpy = 1.22.3 +> opencv-python-headless = 4.5.5.64 +> tensorflow==2.8.0 \ No newline at end of file diff --git a/static/main.css b/static/main.css new file mode 100644 index 0000000000000000000000000000000000000000..b20a43861c82fb83f18fd28c64cded4328ee2c01 --- /dev/null +++ b/static/main.css @@ -0,0 +1,2327 @@ +/* Generated on: Tue Apr 19 2022 21:22:13 GMT+0000 (Coordinated Universal Time) */ +/* ========================================================================== + normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css + ========================================================================== */ +/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */ +html { + font-family: sans-serif; + -ms-text-size-adjust: 100%; + -webkit-text-size-adjust: 100%; +} +body { + margin: 0; +} +article, +aside, +details, +figcaption, +figure, +footer, +header, +hgroup, +main, +menu, +nav, +section, +summary { + display: block; +} +audio, +canvas, +progress, +video { + display: inline-block; + vertical-align: baseline; +} +audio:not([controls]) { + display: none; + height: 0; +} +[hidden], +template { + display: none; +} +a { + background-color: transparent; +} +a:active, +a:hover { + outline: 0; +} +abbr[title] { + border-bottom: 1px dotted; +} +b, +strong { + font-weight: bold; +} +dfn { + font-style: italic; +} +h1 { + font-size: 2em; + margin: 0.67em 0; +} +mark { + background: #ff0; + color: #000; +} +small { + font-size: 80%; +} +sub, +sup { + font-size: 75%; + line-height: 0; + position: relative; + vertical-align: baseline; +} +sup { + top: -0.5em; +} +sub { + bottom: -0.25em; +} +img { + border: 0; +} +svg:not(:root) { + overflow: hidden; +} +figure { + margin: 1em 40px; +} +hr { + box-sizing: content-box; + height: 0; +} +pre { + overflow: auto; +} +code, +kbd, +pre, +samp { + font-family: monospace, monospace; + font-size: 1em; +} +button, +input, +optgroup, +select, +textarea { + color: inherit; + font: inherit; + margin: 0; +} +button { + overflow: visible; +} +button, +select { + text-transform: none; +} +button, +html input[type="button"], +input[type="reset"] { + -webkit-appearance: button; + cursor: pointer; +} +button[disabled], +html input[disabled] { + cursor: default; +} +button::-moz-focus-inner, +input::-moz-focus-inner { + border: 0; + padding: 0; +} +input { + line-height: normal; +} +input[type="checkbox"], +input[type="radio"] { + box-sizing: border-box; + padding: 0; +} +input[type="number"]::-webkit-inner-spin-button, +input[type="number"]::-webkit-outer-spin-button { + height: auto; +} +input[type="search"] { + -webkit-appearance: none; +} +input[type="search"]::-webkit-search-cancel-button, +input[type="search"]::-webkit-search-decoration { + -webkit-appearance: none; +} +fieldset { + border: 1px solid #c0c0c0; + margin: 0 2px; + padding: 0.35em 0.625em 0.75em; +} +legend { + border: 0; + padding: 0; +} +textarea { + overflow: auto; +} +optgroup { + font-weight: bold; +} +table { + border-collapse: collapse; + border-spacing: 0; +} +td, +th { + padding: 0; +} + +/* ========================================================================== + Start of base Webflow CSS - If you're looking for some ultra-clean CSS, skip the boilerplate and see the unminified code below. + ========================================================================== */ +@font-face { + font-family: "webflow-icons"; + src: url("data:application/x-font-ttf;charset=utf-8;base64,AAEAAAALAIAAAwAwT1MvMg8SBiUAAAC8AAAAYGNtYXDpP+a4AAABHAAAAFxnYXNwAAAAEAAAAXgAAAAIZ2x5ZmhS2XEAAAGAAAADHGhlYWQTFw3HAAAEnAAAADZoaGVhCXYFgQAABNQAAAAkaG10eCe4A1oAAAT4AAAAMGxvY2EDtALGAAAFKAAAABptYXhwABAAPgAABUQAAAAgbmFtZSoCsMsAAAVkAAABznBvc3QAAwAAAAAHNAAAACAAAwP4AZAABQAAApkCzAAAAI8CmQLMAAAB6wAzAQkAAAAAAAAAAAAAAAAAAAABEAAAAAAAAAAAAAAAAAAAAABAAADpAwPA/8AAQAPAAEAAAAABAAAAAAAAAAAAAAAgAAAAAAADAAAAAwAAABwAAQADAAAAHAADAAEAAAAcAAQAQAAAAAwACAACAAQAAQAg5gPpA//9//8AAAAAACDmAOkA//3//wAB/+MaBBcIAAMAAQAAAAAAAAAAAAAAAAABAAH//wAPAAEAAAAAAAAAAAACAAA3OQEAAAAAAQAAAAAAAAAAAAIAADc5AQAAAAABAAAAAAAAAAAAAgAANzkBAAAAAAEBIAAAAyADgAAFAAAJAQcJARcDIP5AQAGA/oBAAcABwED+gP6AQAABAOAAAALgA4AABQAAEwEXCQEH4AHAQP6AAYBAAcABwED+gP6AQAAAAwDAAOADQALAAA8AHwAvAAABISIGHQEUFjMhMjY9ATQmByEiBh0BFBYzITI2PQE0JgchIgYdARQWMyEyNj0BNCYDIP3ADRMTDQJADRMTDf3ADRMTDQJADRMTDf3ADRMTDQJADRMTAsATDSANExMNIA0TwBMNIA0TEw0gDRPAEw0gDRMTDSANEwAAAAABAJ0AtAOBApUABQAACQIHCQEDJP7r/upcAXEBcgKU/usBFVz+fAGEAAAAAAL//f+9BAMDwwAEAAkAABcBJwEXAwE3AQdpA5ps/GZsbAOabPxmbEMDmmz8ZmwDmvxmbAOabAAAAgAA/8AEAAPAAB0AOwAABSInLgEnJjU0Nz4BNzYzMTIXHgEXFhUUBw4BBwYjNTI3PgE3NjU0Jy4BJyYjMSIHDgEHBhUUFx4BFxYzAgBqXV6LKCgoKIteXWpqXV6LKCgoKIteXWpVSktvICEhIG9LSlVVSktvICEhIG9LSlVAKCiLXl1qal1eiygoKCiLXl1qal1eiygoZiEgb0tKVVVKS28gISEgb0tKVVVKS28gIQABAAABwAIAA8AAEgAAEzQ3PgE3NjMxFSIHDgEHBhUxIwAoKIteXWpVSktvICFmAcBqXV6LKChmISBvS0pVAAAAAgAA/8AFtgPAADIAOgAAARYXHgEXFhUUBw4BBwYHIxUhIicuAScmNTQ3PgE3NjMxOAExNDc+ATc2MzIXHgEXFhcVATMJATMVMzUEjD83NlAXFxYXTjU1PQL8kz01Nk8XFxcXTzY1PSIjd1BQWlJJSXInJw3+mdv+2/7c25MCUQYcHFg5OUA/ODlXHBwIAhcXTzY1PTw1Nk8XF1tQUHcjIhwcYUNDTgL+3QFt/pOTkwABAAAAAQAAmM7nP18PPPUACwQAAAAAANciZKUAAAAA1yJkpf/9/70FtgPDAAAACAACAAAAAAAAAAEAAAPA/8AAAAW3//3//QW2AAEAAAAAAAAAAAAAAAAAAAAMBAAAAAAAAAAAAAAAAgAAAAQAASAEAADgBAAAwAQAAJ0EAP/9BAAAAAQAAAAFtwAAAAAAAAAKABQAHgAyAEYAjACiAL4BFgE2AY4AAAABAAAADAA8AAMAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAADgCuAAEAAAAAAAEADQAAAAEAAAAAAAIABwCWAAEAAAAAAAMADQBIAAEAAAAAAAQADQCrAAEAAAAAAAUACwAnAAEAAAAAAAYADQBvAAEAAAAAAAoAGgDSAAMAAQQJAAEAGgANAAMAAQQJAAIADgCdAAMAAQQJAAMAGgBVAAMAAQQJAAQAGgC4AAMAAQQJAAUAFgAyAAMAAQQJAAYAGgB8AAMAAQQJAAoANADsd2ViZmxvdy1pY29ucwB3AGUAYgBmAGwAbwB3AC0AaQBjAG8AbgBzVmVyc2lvbiAxLjAAVgBlAHIAcwBpAG8AbgAgADEALgAwd2ViZmxvdy1pY29ucwB3AGUAYgBmAGwAbwB3AC0AaQBjAG8AbgBzd2ViZmxvdy1pY29ucwB3AGUAYgBmAGwAbwB3AC0AaQBjAG8AbgBzUmVndWxhcgBSAGUAZwB1AGwAYQByd2ViZmxvdy1pY29ucwB3AGUAYgBmAGwAbwB3AC0AaQBjAG8AbgBzRm9udCBnZW5lcmF0ZWQgYnkgSWNvTW9vbi4ARgBvAG4AdAAgAGcAZQBuAGUAcgBhAHQAZQBkACAAYgB5ACAASQBjAG8ATQBvAG8AbgAuAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==") + format("truetype"); + font-weight: normal; + font-style: normal; +} +[class^="w-icon-"], +[class*=" w-icon-"] { + font-family: "webflow-icons" !important; + speak: none; + font-style: normal; + font-weight: normal; + font-variant: normal; + text-transform: none; + line-height: 1; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} +.w-icon-slider-right:before { + content: "\e600"; +} +.w-icon-slider-left:before { + content: "\e601"; +} +.w-icon-nav-menu:before { + content: "\e602"; +} +.w-icon-arrow-down:before, +.w-icon-dropdown-toggle:before { + content: "\e603"; +} +.w-icon-file-upload-remove:before { + content: "\e900"; +} +.w-icon-file-upload-icon:before { + content: "\e903"; +} +* { + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; +} +html { + height: 100%; +} +body { + margin: 0; + min-height: 100%; + background-color: #fff; + font-family: Arial, sans-serif; + font-size: 14px; + line-height: 20px; + color: #333; +} +img { + max-width: 100%; + vertical-align: middle; + display: inline-block; +} +html.w-mod-touch * { + background-attachment: scroll !important; +} +.w-block { + display: block; +} +.w-inline-block { + max-width: 100%; + display: inline-block; +} +.w-clearfix:before, +.w-clearfix:after { + content: " "; + display: table; + grid-column-start: 1; + grid-row-start: 1; + grid-column-end: 2; + grid-row-end: 2; +} +.w-clearfix:after { + clear: both; +} + +.w-button { + display: inline-block; + padding: 9px 15px; + background-color: #3898ec; + color: white; + border: 0; + line-height: inherit; + text-decoration: none; + cursor: pointer; + border-radius: 0; +} +input.w-button { + -webkit-appearance: button; +} +html[data-w-dynpage] [data-w-cloak] { + color: transparent !important; +} +.w-webflow-badge, +.w-webflow-badge * { + position: static; + left: auto; + top: auto; + right: auto; + bottom: auto; + z-index: auto; + display: block; + visibility: visible; + overflow: visible; + overflow-x: visible; + overflow-y: visible; + box-sizing: border-box; + width: auto; + height: auto; + max-height: none; + max-width: none; + min-height: 0; + min-width: 0; + margin: 0; + padding: 0; + float: none; + clear: none; + border: 0 none transparent; + border-radius: 0; + background: none; + background-image: none; + background-position: 0 0; + background-size: auto auto; + background-repeat: repeat; + background-origin: padding-box; + background-clip: border-box; + background-attachment: scroll; + background-color: transparent; + box-shadow: none; + opacity: 1; + transform: none; + transition: none; + direction: ltr; + font-family: inherit; + font-weight: inherit; + color: inherit; + font-size: inherit; + line-height: inherit; + font-style: inherit; + font-variant: inherit; + text-align: inherit; + letter-spacing: inherit; + text-decoration: inherit; + text-indent: 0; + text-transform: inherit; + list-style-type: disc; + text-shadow: none; + font-smoothing: auto; + vertical-align: baseline; + cursor: inherit; + white-space: inherit; + word-break: normal; + word-spacing: normal; + word-wrap: normal; +} +.w-webflow-badge { + position: fixed !important; + display: inline-block !important; + visibility: visible !important; + z-index: 2147483647 !important; + top: auto !important; + right: 12px !important; + bottom: 12px !important; + left: auto !important; + color: #aaadb0 !important; + background-color: #fff !important; + border-radius: 3px !important; + padding: 6px 8px 6px 6px !important; + font-size: 12px !important; + opacity: 1 !important; + line-height: 14px !important; + text-decoration: none !important; + transform: none !important; + margin: 0 !important; + width: auto !important; + height: auto !important; + overflow: visible !important; + white-space: nowrap; + box-shadow: 0 0 0 1px rgba(0, 0, 0, 0.1), 0 1px 3px rgba(0, 0, 0, 0.1); + cursor: pointer; +} +.w-webflow-badge > img { + display: inline-block !important; + visibility: visible !important; + opacity: 1 !important; + vertical-align: middle !important; +} +h1, +h2, +h3, +h4, +h5, +h6 { + font-weight: bold; + margin-bottom: 10px; +} +h1 { + font-size: 38px; + line-height: 44px; + margin-top: 20px; +} +h2 { + font-size: 32px; + line-height: 36px; + margin-top: 20px; +} +h3 { + font-size: 24px; + line-height: 30px; + margin-top: 20px; +} +h4 { + font-size: 18px; + line-height: 24px; + margin-top: 10px; +} +h5 { + font-size: 14px; + line-height: 20px; + margin-top: 10px; +} +h6 { + font-size: 12px; + line-height: 18px; + margin-top: 10px; +} +p { + margin-top: 0; + margin-bottom: 10px; +} +blockquote { + margin: 0 0 10px 0; + padding: 10px 20px; + border-left: 5px solid #e2e2e2; + font-size: 18px; + line-height: 22px; +} +figure { + margin: 0; + margin-bottom: 10px; +} +figcaption { + margin-top: 5px; + text-align: center; +} +ul, +ol { + margin-top: 0; + margin-bottom: 10px; + padding-left: 40px; +} +.w-list-unstyled { + padding-left: 0; + list-style: none; +} +.w-embed:before, +.w-embed:after { + content: " "; + display: table; + grid-column-start: 1; + grid-row-start: 1; + grid-column-end: 2; + grid-row-end: 2; +} +.w-embed:after { + clear: both; +} +.w-video { + width: 100%; + position: relative; + padding: 0; +} +.w-video iframe, +.w-video object, +.w-video embed { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; + border: none; +} +fieldset { + padding: 0; + margin: 0; + border: 0; +} +button, +html input[type="button"], +input[type="reset"] { + border: 0; + cursor: pointer; + -webkit-appearance: button; +} +.w-form { + margin: 0 0 15px; +} +.w-form-done { + display: none; + padding: 20px; + text-align: center; + background-color: #dddddd; +} +.w-form-fail { + display: none; + margin-top: 10px; + padding: 10px; + background-color: #ffdede; +} +label { + display: block; + margin-bottom: 5px; + font-weight: bold; +} +.w-input, +.w-select { + display: block; + width: 100%; + height: 38px; + padding: 8px 12px; + margin-bottom: 10px; + font-size: 14px; + line-height: 1.42857143; + color: #333333; + vertical-align: middle; + background-color: #ffffff; + border: 1px solid #cccccc; +} +.w-input:-moz-placeholder, +.w-select:-moz-placeholder { + color: #999; +} +.w-input::-moz-placeholder, +.w-select::-moz-placeholder { + color: #999; + opacity: 1; +} +.w-input:-ms-input-placeholder, +.w-select:-ms-input-placeholder { + color: #999; +} +.w-input::-webkit-input-placeholder, +.w-select::-webkit-input-placeholder { + color: #999; +} +.w-input:focus, +.w-select:focus { + border-color: #3898ec; + outline: 0; +} +.w-input[disabled], +.w-select[disabled], +.w-input[readonly], +.w-select[readonly], +fieldset[disabled] .w-input, +fieldset[disabled] .w-select { + cursor: not-allowed; + background-color: #eeeeee; +} +textarea.w-input, +textarea.w-select { + height: auto; +} +.w-select { + background-color: #f3f3f3; +} +.w-select[multiple] { + height: auto; +} +.w-form-label { + display: inline-block; + cursor: pointer; + font-weight: normal; + margin-bottom: 0; +} +.w-radio { + display: block; + margin-bottom: 5px; + padding-left: 20px; +} +.w-radio:before, +.w-radio:after { + content: " "; + display: table; + grid-column-start: 1; + grid-row-start: 1; + grid-column-end: 2; + grid-row-end: 2; +} +.w-radio:after { + clear: both; +} +.w-radio-input { + margin: 4px 0 0; + margin-top: 1px \9; + line-height: normal; + float: left; + margin-left: -20px; +} +.w-radio-input { + margin-top: 3px; +} +.w-file-upload { + display: block; + margin-bottom: 10px; +} +.w-file-upload-input { + width: 0.1px; + height: 0.1px; + opacity: 0; + overflow: hidden; + position: absolute; + z-index: -100; +} +.w-file-upload-default, +.w-file-upload-uploading, +.w-file-upload-success { + display: inline-block; + color: #333333; +} +.w-file-upload-error { + display: block; + margin-top: 10px; +} +.w-file-upload-default.w-hidden, +.w-file-upload-uploading.w-hidden, +.w-file-upload-error.w-hidden, +.w-file-upload-success.w-hidden { + display: none; +} +.w-file-upload-uploading-btn { + display: flex; + font-size: 14px; + font-weight: normal; + cursor: pointer; + margin: 0; + padding: 8px 12px; + border: 1px solid #cccccc; + background-color: #fafafa; +} +.w-file-upload-file { + display: flex; + flex-grow: 1; + justify-content: space-between; + margin: 0; + padding: 8px 9px 8px 11px; + border: 1px solid #cccccc; + background-color: #fafafa; +} +.w-file-upload-file-name { + font-size: 14px; + font-weight: normal; + display: block; +} +.w-file-remove-link { + margin-top: 3px; + margin-left: 10px; + width: auto; + height: auto; + padding: 3px; + display: block; + cursor: pointer; +} +.w-icon-file-upload-remove { + margin: auto; + font-size: 10px; +} +.w-file-upload-error-msg { + display: inline-block; + color: #ea384c; + padding: 2px 0; +} +.w-file-upload-info { + display: inline-block; + line-height: 38px; + padding: 0 12px; +} +.w-file-upload-label { + display: inline-block; + font-size: 14px; + font-weight: normal; + cursor: pointer; + margin: 0; + padding: 8px 12px; + border: 1px solid #cccccc; + background-color: #fafafa; +} +.w-icon-file-upload-icon, +.w-icon-file-upload-uploading { + display: inline-block; + margin-right: 8px; + width: 20px; +} +.w-icon-file-upload-uploading { + height: 20px; +} +.w-container { + margin-left: auto; + margin-right: auto; + max-width: 940px; +} +.w-container:before, +.w-container:after { + content: " "; + display: table; + grid-column-start: 1; + grid-row-start: 1; + grid-column-end: 2; + grid-row-end: 2; +} +.w-container:after { + clear: both; +} +.w-container .w-row { + margin-left: -10px; + margin-right: -10px; +} +.w-row:before, +.w-row:after { + content: " "; + display: table; + grid-column-start: 1; + grid-row-start: 1; + grid-column-end: 2; + grid-row-end: 2; +} +.w-row:after { + clear: both; +} +.w-row .w-row { + margin-left: 0; + margin-right: 0; +} +.w-col { + position: relative; + float: left; + width: 100%; + min-height: 1px; + padding-left: 10px; + padding-right: 10px; +} +.w-col .w-col { + padding-left: 0; + padding-right: 0; +} +.w-col-1 { + width: 8.33333333%; +} +.w-col-2 { + width: 16.66666667%; +} +.w-col-3 { + width: 25%; +} +.w-col-4 { + width: 33.33333333%; +} +.w-col-5 { + width: 41.66666667%; +} +.w-col-6 { + width: 50%; +} +.w-col-7 { + width: 58.33333333%; +} +.w-col-8 { + width: 66.66666667%; +} +.w-col-9 { + width: 75%; +} +.w-col-10 { + width: 83.33333333%; +} +.w-col-11 { + width: 91.66666667%; +} +.w-col-12 { + width: 100%; +} +.w-hidden-main { + display: none !important; +} +@media screen and (max-width: 991px) { + .w-container { + max-width: 728px; + } + .w-hidden-main { + display: inherit !important; + } + .w-hidden-medium { + display: none !important; + } + .w-col-medium-1 { + width: 8.33333333%; + } + .w-col-medium-2 { + width: 16.66666667%; + } + .w-col-medium-3 { + width: 25%; + } + .w-col-medium-4 { + width: 33.33333333%; + } + .w-col-medium-5 { + width: 41.66666667%; + } + .w-col-medium-6 { + width: 50%; + } + .w-col-medium-7 { + width: 58.33333333%; + } + .w-col-medium-8 { + width: 66.66666667%; + } + .w-col-medium-9 { + width: 75%; + } + .w-col-medium-10 { + width: 83.33333333%; + } + .w-col-medium-11 { + width: 91.66666667%; + } + .w-col-medium-12 { + width: 100%; + } + .w-col-stack { + width: 100%; + left: auto; + right: auto; + } +} +@media screen and (max-width: 767px) { + .w-hidden-main { + display: inherit !important; + } + .w-hidden-medium { + display: inherit !important; + } + .w-hidden-small { + display: none !important; + } + .w-row, + .w-container .w-row { + margin-left: 0; + margin-right: 0; + } + .w-col { + width: 100%; + left: auto; + right: auto; + } + .w-col-small-1 { + width: 8.33333333%; + } + .w-col-small-2 { + width: 16.66666667%; + } + .w-col-small-3 { + width: 25%; + } + .w-col-small-4 { + width: 33.33333333%; + } + .w-col-small-5 { + width: 41.66666667%; + } + .w-col-small-6 { + width: 50%; + } + .w-col-small-7 { + width: 58.33333333%; + } + .w-col-small-8 { + width: 66.66666667%; + } + .w-col-small-9 { + width: 75%; + } + .w-col-small-10 { + width: 83.33333333%; + } + .w-col-small-11 { + width: 91.66666667%; + } + .w-col-small-12 { + width: 100%; + } +} +@media screen and (max-width: 479px) { + .w-container { + max-width: none; + } + .w-hidden-main { + display: inherit !important; + } + .w-hidden-medium { + display: inherit !important; + } + .w-hidden-small { + display: inherit !important; + } + .w-hidden-tiny { + display: none !important; + } + .w-col { + width: 100%; + } + .w-col-tiny-1 { + width: 8.33333333%; + } + .w-col-tiny-2 { + width: 16.66666667%; + } + .w-col-tiny-3 { + width: 25%; + } + .w-col-tiny-4 { + width: 33.33333333%; + } + .w-col-tiny-5 { + width: 41.66666667%; + } + .w-col-tiny-6 { + width: 50%; + } + .w-col-tiny-7 { + width: 58.33333333%; + } + .w-col-tiny-8 { + width: 66.66666667%; + } + .w-col-tiny-9 { + width: 75%; + } + .w-col-tiny-10 { + width: 83.33333333%; + } + .w-col-tiny-11 { + width: 91.66666667%; + } + .w-col-tiny-12 { + width: 100%; + } +} +.w-widget { + position: relative; +} +.w-widget-map { + width: 100%; + height: 400px; +} +.w-widget-map label { + width: auto; + display: inline; +} +.w-widget-map img { + max-width: inherit; +} +.w-widget-map .gm-style-iw { + text-align: center; +} +.w-widget-map .gm-style-iw > button { + display: none !important; +} +.w-widget-twitter { + overflow: hidden; +} +.w-widget-twitter-count-shim { + display: inline-block; + vertical-align: top; + position: relative; + width: 28px; + height: 20px; + text-align: center; + background: white; + border: #758696 solid 1px; + border-radius: 3px; +} +.w-widget-twitter-count-shim * { + pointer-events: none; + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} +.w-widget-twitter-count-shim .w-widget-twitter-count-inner { + position: relative; + font-size: 15px; + line-height: 12px; + text-align: center; + color: #999; + font-family: serif; +} +.w-widget-twitter-count-shim .w-widget-twitter-count-clear { + position: relative; + display: block; +} +.w-widget-twitter-count-shim.w--large { + width: 36px; + height: 28px; +} +.w-widget-twitter-count-shim.w--large .w-widget-twitter-count-inner { + font-size: 18px; + line-height: 18px; +} +.w-widget-twitter-count-shim:not(.w--vertical) { + margin-left: 5px; + margin-right: 8px; +} +.w-widget-twitter-count-shim:not(.w--vertical).w--large { + margin-left: 6px; +} +.w-widget-twitter-count-shim:not(.w--vertical):before, +.w-widget-twitter-count-shim:not(.w--vertical):after { + top: 50%; + left: 0; + border: solid transparent; + content: " "; + height: 0; + width: 0; + position: absolute; + pointer-events: none; +} +.w-widget-twitter-count-shim:not(.w--vertical):before { + border-color: rgba(117, 134, 150, 0); + border-right-color: #5d6c7b; + border-width: 4px; + margin-left: -9px; + margin-top: -4px; +} +.w-widget-twitter-count-shim:not(.w--vertical).w--large:before { + border-width: 5px; + margin-left: -10px; + margin-top: -5px; +} +.w-widget-twitter-count-shim:not(.w--vertical):after { + border-color: rgba(255, 255, 255, 0); + border-right-color: white; + border-width: 4px; + margin-left: -8px; + margin-top: -4px; +} +.w-widget-twitter-count-shim:not(.w--vertical).w--large:after { + border-width: 5px; + margin-left: -9px; + margin-top: -5px; +} +.w-widget-twitter-count-shim.w--vertical { + width: 61px; + height: 33px; + margin-bottom: 8px; +} +.w-widget-twitter-count-shim.w--vertical:before, +.w-widget-twitter-count-shim.w--vertical:after { + top: 100%; + left: 50%; + border: solid transparent; + content: " "; + height: 0; + width: 0; + position: absolute; + pointer-events: none; +} +.w-widget-twitter-count-shim.w--vertical:before { + border-color: rgba(117, 134, 150, 0); + border-top-color: #5d6c7b; + border-width: 5px; + margin-left: -5px; +} +.w-widget-twitter-count-shim.w--vertical:after { + border-color: rgba(255, 255, 255, 0); + border-top-color: white; + border-width: 4px; + margin-left: -4px; +} +.w-widget-twitter-count-shim.w--vertical .w-widget-twitter-count-inner { + font-size: 18px; + line-height: 22px; +} +.w-widget-twitter-count-shim.w--vertical.w--large { + width: 76px; +} +.w-background-video { + position: relative; + overflow: hidden; + height: 500px; + color: white; +} +.w-background-video > video { + background-size: cover; + background-position: 50% 50%; + position: absolute; + margin: auto; + width: 100%; + height: 100%; + right: -100%; + bottom: -100%; + top: -100%; + left: -100%; + object-fit: cover; + z-index: -100; +} +.w-background-video > video::-webkit-media-controls-start-playback-button { + display: none !important; + -webkit-appearance: none; +} +.w-slider { + position: relative; + height: 300px; + text-align: center; + background: #dddddd; + clear: both; + -webkit-tap-highlight-color: rgba(0, 0, 0, 0); + tap-highlight-color: rgba(0, 0, 0, 0); +} +.w-slider-mask { + position: relative; + display: block; + overflow: hidden; + z-index: 1; + left: 0; + right: 0; + height: 100%; + white-space: nowrap; +} +.w-slide { + position: relative; + display: inline-block; + vertical-align: top; + width: 100%; + height: 100%; + white-space: normal; + text-align: left; +} +.w-slider-nav { + position: absolute; + z-index: 2; + top: auto; + right: 0; + bottom: 0; + left: 0; + margin: auto; + padding-top: 10px; + height: 40px; + text-align: center; + -webkit-tap-highlight-color: rgba(0, 0, 0, 0); + tap-highlight-color: rgba(0, 0, 0, 0); +} +.w-slider-nav.w-round > div { + border-radius: 100%; +} +.w-slider-nav.w-num > div { + width: auto; + height: auto; + padding: 0.2em 0.5em; + font-size: inherit; + line-height: inherit; +} +.w-slider-nav.w-shadow > div { + box-shadow: 0 0 3px rgba(51, 51, 51, 0.4); +} +.w-slider-nav-invert { + color: #fff; +} +.w-slider-nav-invert > div { + background-color: rgba(34, 34, 34, 0.4); +} +.w-slider-nav-invert > div.w-active { + background-color: #222; +} +.w-slider-dot { + position: relative; + display: inline-block; + width: 1em; + height: 1em; + background-color: rgba(255, 255, 255, 0.4); + cursor: pointer; + margin: 0 3px 0.5em; + transition: background-color 100ms, color 100ms; +} +.w-slider-dot.w-active { + background-color: #fff; +} +.w-slider-dot:focus { + outline: none; + box-shadow: 0 0 0 2px #fff; +} +.w-slider-dot:focus.w-active { + box-shadow: none; +} +.w-slider-arrow-left, +.w-slider-arrow-right { + position: absolute; + width: 80px; + top: 0; + right: 0; + bottom: 0; + left: 0; + margin: auto; + cursor: pointer; + overflow: hidden; + color: white; + font-size: 40px; + -webkit-tap-highlight-color: rgba(0, 0, 0, 0); + tap-highlight-color: rgba(0, 0, 0, 0); + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} +.w-slider-arrow-left [class^="w-icon-"], +.w-slider-arrow-right [class^="w-icon-"], +.w-slider-arrow-left [class*=" w-icon-"], +.w-slider-arrow-right [class*=" w-icon-"] { + position: absolute; +} +.w-slider-arrow-left:focus, +.w-slider-arrow-right:focus { + outline: 0; +} +.w-slider-arrow-left { + z-index: 3; + right: auto; +} +.w-slider-arrow-right { + z-index: 4; + left: auto; +} +.w-icon-slider-left, +.w-icon-slider-right { + top: 0; + right: 0; + bottom: 0; + left: 0; + margin: auto; + width: 1em; + height: 1em; +} +.w-slider-aria-label { + border: 0; + clip: rect(0 0 0 0); + height: 1px; + margin: -1px; + overflow: hidden; + padding: 0; + position: absolute; + width: 1px; +} +.w-slider-force-show { + display: block !important; +} +.w-dropdown { + display: inline-block; + position: relative; + text-align: left; + margin-left: auto; + margin-right: auto; + z-index: 900; +} +.w-dropdown-btn, +.w-dropdown-toggle, +.w-dropdown-link { + position: relative; + vertical-align: top; + text-decoration: none; + color: #222222; + padding: 20px; + text-align: left; + margin-left: auto; + margin-right: auto; + white-space: nowrap; +} +.w-dropdown-toggle { + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; + display: inline-block; + cursor: pointer; + padding-right: 40px; +} +.w-dropdown-toggle:focus { + outline: 0; +} +.w-icon-dropdown-toggle { + position: absolute; + top: 0; + right: 0; + bottom: 0; + margin: auto; + margin-right: 20px; + width: 1em; + height: 1em; +} +.w-dropdown-list { + position: absolute; + background: #dddddd; + display: none; + min-width: 100%; +} +.w-dropdown-list.w--open { + display: block; +} +.w-dropdown-link { + padding: 10px 20px; + display: block; + color: #222222; +} +.w-dropdown-link.w--current { + color: #0082f3; +} +.w-dropdown-link:focus { + outline: 0; +} +@media screen and (max-width: 767px) { + .w-nav-brand { + padding-left: 10px; + } +} +.w-lightbox-backdrop { + color: #000; + cursor: auto; + font-family: serif; + font-size: medium; + font-style: normal; + font-variant: normal; + font-weight: normal; + letter-spacing: normal; + line-height: normal; + list-style: disc; + text-align: start; + text-indent: 0; + text-shadow: none; + text-transform: none; + visibility: visible; + white-space: normal; + word-break: normal; + word-spacing: normal; + word-wrap: normal; + position: fixed; + top: 0; + right: 0; + bottom: 0; + left: 0; + color: #fff; + font-family: "Helvetica Neue", Helvetica, Ubuntu, "Segoe UI", Verdana, + sans-serif; + font-size: 17px; + line-height: 1.2; + font-weight: 300; + text-align: center; + background: rgba(0, 0, 0, 0.9); + z-index: 2000; + outline: 0; + opacity: 0; + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + -webkit-tap-highlight-color: transparent; + -webkit-transform: translate(0, 0); +} +.w-lightbox-backdrop, +.w-lightbox-container { + height: 100%; + overflow: auto; + -webkit-overflow-scrolling: touch; +} +.w-lightbox-content { + position: relative; + height: 100vh; + overflow: hidden; +} +.w-lightbox-view { + position: absolute; + width: 100vw; + height: 100vh; + opacity: 0; +} +.w-lightbox-view:before { + content: ""; + height: 100vh; +} +.w-lightbox-group, +.w-lightbox-group .w-lightbox-view, +.w-lightbox-group .w-lightbox-view:before { + height: 86vh; +} +.w-lightbox-frame, +.w-lightbox-view:before { + display: inline-block; + vertical-align: middle; +} +.w-lightbox-figure { + position: relative; + margin: 0; +} +.w-lightbox-group .w-lightbox-figure { + cursor: pointer; +} +.w-lightbox-img { + width: auto; + height: auto; + max-width: none; +} +.w-lightbox-image { + display: block; + float: none; + max-width: 100vw; + max-height: 100vh; +} +.w-lightbox-group .w-lightbox-image { + max-height: 86vh; +} +.w-lightbox-caption { + position: absolute; + right: 0; + bottom: 0; + left: 0; + padding: 0.5em 1em; + background: rgba(0, 0, 0, 0.4); + text-align: left; + text-overflow: ellipsis; + white-space: nowrap; + overflow: hidden; +} +.w-lightbox-embed { + position: absolute; + top: 0; + right: 0; + bottom: 0; + left: 0; + width: 100%; + height: 100%; +} +.w-lightbox-control { + position: absolute; + top: 0; + width: 4em; + background-size: 24px; + background-repeat: no-repeat; + background-position: center; + cursor: pointer; + -webkit-transition: all 0.3s; + transition: all 0.3s; +} +.w-lightbox-left { + display: none; + bottom: 0; + left: 0; + background-image: url("data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9Ii0yMCAwIDI0IDQwIiB3aWR0aD0iMjQiIGhlaWdodD0iNDAiPjxnIHRyYW5zZm9ybT0icm90YXRlKDQ1KSI+PHBhdGggZD0ibTAgMGg1djIzaDIzdjVoLTI4eiIgb3BhY2l0eT0iLjQiLz48cGF0aCBkPSJtMSAxaDN2MjNoMjN2M2gtMjZ6IiBmaWxsPSIjZmZmIi8+PC9nPjwvc3ZnPg=="); +} +.w-lightbox-right { + display: none; + right: 0; + bottom: 0; + background-image: url("data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9Ii00IDAgMjQgNDAiIHdpZHRoPSIyNCIgaGVpZ2h0PSI0MCI+PGcgdHJhbnNmb3JtPSJyb3RhdGUoNDUpIj48cGF0aCBkPSJtMC0waDI4djI4aC01di0yM2gtMjN6IiBvcGFjaXR5PSIuNCIvPjxwYXRoIGQ9Im0xIDFoMjZ2MjZoLTN2LTIzaC0yM3oiIGZpbGw9IiNmZmYiLz48L2c+PC9zdmc+"); +} +.w-lightbox-close { + right: 0; + height: 2.6em; + background-image: url("data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9Ii00IDAgMTggMTciIHdpZHRoPSIxOCIgaGVpZ2h0PSIxNyI+PGcgdHJhbnNmb3JtPSJyb3RhdGUoNDUpIj48cGF0aCBkPSJtMCAwaDd2LTdoNXY3aDd2NWgtN3Y3aC01di03aC03eiIgb3BhY2l0eT0iLjQiLz48cGF0aCBkPSJtMSAxaDd2LTdoM3Y3aDd2M2gtN3Y3aC0zdi03aC03eiIgZmlsbD0iI2ZmZiIvPjwvZz48L3N2Zz4="); + background-size: 18px; +} +.w-lightbox-strip { + position: absolute; + bottom: 0; + left: 0; + right: 0; + padding: 0 1vh; + line-height: 0; + white-space: nowrap; + overflow-x: auto; + overflow-y: hidden; +} +.w-lightbox-item { + display: inline-block; + width: 10vh; + padding: 2vh 1vh; + box-sizing: content-box; + cursor: pointer; + -webkit-transform: translate3d(0, 0, 0); +} +.w-lightbox-active { + opacity: 0.3; +} +.w-lightbox-thumbnail { + position: relative; + height: 10vh; + background: #222; + overflow: hidden; +} +.w-lightbox-thumbnail-image { + position: absolute; + top: 0; + left: 0; +} +.w-lightbox-thumbnail .w-lightbox-tall { + top: 50%; + width: 100%; + -webkit-transform: translate(0, -50%); + -ms-transform: translate(0, -50%); + transform: translate(0, -50%); +} +.w-lightbox-thumbnail .w-lightbox-wide { + left: 50%; + height: 100%; + -webkit-transform: translate(-50%, 0); + -ms-transform: translate(-50%, 0); + transform: translate(-50%, 0); +} +.w-lightbox-spinner { + position: absolute; + top: 50%; + left: 50%; + box-sizing: border-box; + width: 40px; + height: 40px; + margin-top: -20px; + margin-left: -20px; + border: 5px solid rgba(0, 0, 0, 0.4); + border-radius: 50%; + -webkit-animation: spin 0.8s infinite linear; + animation: spin 0.8s infinite linear; +} +.w-lightbox-spinner:after { + content: ""; + position: absolute; + top: -4px; + right: -4px; + bottom: -4px; + left: -4px; + border: 3px solid transparent; + border-bottom-color: #fff; + border-radius: 50%; +} +.w-lightbox-hide { + display: none; +} +.w-lightbox-noscroll { + overflow: hidden; +} +@media (min-width: 768px) { + .w-lightbox-content { + height: 96vh; + margin-top: 2vh; + } + .w-lightbox-view, + .w-lightbox-view:before { + height: 96vh; + } + .w-lightbox-group, + .w-lightbox-group .w-lightbox-view, + .w-lightbox-group .w-lightbox-view:before { + height: 84vh; + } + .w-lightbox-image { + max-width: 96vw; + max-height: 96vh; + } + .w-lightbox-group .w-lightbox-image { + max-width: 82.3vw; + max-height: 84vh; + } + .w-lightbox-left, + .w-lightbox-right { + display: block; + opacity: 0.5; + } + .w-lightbox-close { + opacity: 0.8; + } + .w-lightbox-control:hover { + opacity: 1; + } +} +.w-lightbox-inactive, +.w-lightbox-inactive:hover { + opacity: 0; +} +.w-richtext:before, +.w-richtext:after { + content: " "; + display: table; + grid-column-start: 1; + grid-row-start: 1; + grid-column-end: 2; + grid-row-end: 2; +} +.w-richtext:after { + clear: both; +} +.w-richtext[contenteditable="true"]:before, +.w-richtext[contenteditable="true"]:after { + white-space: initial; +} +.w-richtext ol, +.w-richtext ul { + overflow: hidden; +} +.w-richtext .w-richtext-figure-selected.w-richtext-figure-type-video div:after, +.w-richtext .w-richtext-figure-selected[data-rt-type="video"] div:after { + outline: 2px solid #2895f7; +} +.w-richtext .w-richtext-figure-selected.w-richtext-figure-type-image div, +.w-richtext .w-richtext-figure-selected[data-rt-type="image"] div { + outline: 2px solid #2895f7; +} +.w-richtext figure.w-richtext-figure-type-video > div:after, +.w-richtext figure[data-rt-type="video"] > div:after { + content: ""; + position: absolute; + display: none; + left: 0; + top: 0; + right: 0; + bottom: 0; +} +.w-richtext figure { + position: relative; + max-width: 60%; +} +.w-richtext figure > div:before { + cursor: default !important; +} +.w-richtext figure img { + width: 100%; +} +.w-richtext figure figcaption.w-richtext-figcaption-placeholder { + opacity: 0.6; +} +.w-richtext figure div { + font-size: 0; + color: transparent; +} +.w-richtext figure.w-richtext-figure-type-image, +.w-richtext figure[data-rt-type="image"] { + display: table; +} +.w-richtext figure.w-richtext-figure-type-image > div, +.w-richtext figure[data-rt-type="image"] > div { + display: inline-block; +} +.w-richtext figure.w-richtext-figure-type-image > figcaption, +.w-richtext figure[data-rt-type="image"] > figcaption { + display: table-caption; + caption-side: bottom; +} +.w-richtext figure.w-richtext-figure-type-video, +.w-richtext figure[data-rt-type="video"] { + width: 60%; + height: 0; +} +.w-richtext figure.w-richtext-figure-type-video iframe, +.w-richtext figure[data-rt-type="video"] iframe { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; +} +.w-richtext figure.w-richtext-figure-type-video > div, +.w-richtext figure[data-rt-type="video"] > div { + width: 100%; +} +.w-richtext figure.w-richtext-align-center { + margin-right: auto; + margin-left: auto; + clear: both; +} +.w-richtext figure.w-richtext-align-center.w-richtext-figure-type-image > div, +.w-richtext figure.w-richtext-align-center[data-rt-type="image"] > div { + max-width: 100%; +} +.w-richtext figure.w-richtext-align-normal { + clear: both; +} +.w-richtext figure.w-richtext-align-fullwidth { + width: 100%; + max-width: 100%; + text-align: center; + clear: both; + display: block; + margin-right: auto; + margin-left: auto; +} +.w-richtext figure.w-richtext-align-fullwidth > div { + display: inline-block; + padding-bottom: inherit; +} +.w-richtext figure.w-richtext-align-fullwidth > figcaption { + display: block; +} +.w-richtext figure.w-richtext-align-floatleft { + float: left; + margin-right: 15px; + clear: none; +} +.w-richtext figure.w-richtext-align-floatright { + float: right; + margin-left: 15px; + clear: none; +} +.w-nav { + position: relative; + background: #dddddd; + z-index: 1000; +} +.w-nav:before, +.w-nav:after { + content: " "; + display: table; + grid-column-start: 1; + grid-row-start: 1; + grid-column-end: 2; + grid-row-end: 2; +} +.w-nav:after { + clear: both; +} +.w-nav-brand { + position: relative; + float: left; + text-decoration: none; + color: #333333; +} +.w-nav-link { + position: relative; + display: inline-block; + vertical-align: top; + text-decoration: none; + color: #222222; + padding: 20px; + text-align: left; + margin-left: auto; + margin-right: auto; +} +.w-nav-link.w--current { + color: #0082f3; +} +.w-nav-menu { + position: relative; + float: right; +} +[data-nav-menu-open] { + display: block !important; + position: absolute; + top: 100%; + left: 0; + right: 0; + background: #c8c8c8; + text-align: center; + overflow: visible; + min-width: 200px; +} +.w--nav-link-open { + display: block; + position: relative; +} +.w-nav-overlay { + position: absolute; + overflow: hidden; + display: none; + top: 100%; + left: 0; + right: 0; + width: 100%; +} +.w-nav-overlay [data-nav-menu-open] { + top: 0; +} +.w-nav[data-animation="over-left"] .w-nav-overlay { + width: auto; +} +.w-nav[data-animation="over-left"] .w-nav-overlay, +.w-nav[data-animation="over-left"] [data-nav-menu-open] { + right: auto; + z-index: 1; + top: 0; +} +.w-nav[data-animation="over-right"] .w-nav-overlay { + width: auto; +} +.w-nav[data-animation="over-right"] .w-nav-overlay, +.w-nav[data-animation="over-right"] [data-nav-menu-open] { + left: auto; + z-index: 1; + top: 0; +} +.w-nav-button { + position: relative; + float: right; + padding: 18px; + font-size: 24px; + display: none; + cursor: pointer; + -webkit-tap-highlight-color: rgba(0, 0, 0, 0); + tap-highlight-color: rgba(0, 0, 0, 0); + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} +.w-nav-button:focus { + outline: 0; +} +.w-nav-button.w--open { + background-color: #c8c8c8; + color: white; +} +.w-nav[data-collapse="all"] .w-nav-menu { + display: none; +} +.w-nav[data-collapse="all"] .w-nav-button { + display: block; +} +.w--nav-dropdown-open { + display: block; +} +.w--nav-dropdown-toggle-open { + display: block; +} +.w--nav-dropdown-list-open { + position: static; +} +@media screen and (max-width: 991px) { + .w-nav[data-collapse="medium"] .w-nav-menu { + display: none; + } + .w-nav[data-collapse="medium"] .w-nav-button { + display: block; + } +} +@media screen and (max-width: 767px) { + .w-nav[data-collapse="small"] .w-nav-menu { + display: none; + } + .w-nav[data-collapse="small"] .w-nav-button { + display: block; + } + .w-nav-brand { + padding-left: 10px; + } +} +@media screen and (max-width: 479px) { + .w-nav[data-collapse="tiny"] .w-nav-menu { + display: none; + } + .w-nav[data-collapse="tiny"] .w-nav-button { + display: block; + } +} +.w-tabs { + position: relative; +} +.w-tabs:before, +.w-tabs:after { + content: " "; + display: table; + grid-column-start: 1; + grid-row-start: 1; + grid-column-end: 2; + grid-row-end: 2; +} +.w-tabs:after { + clear: both; +} +.w-tab-menu { + position: relative; +} +.w-tab-link { + position: relative; + display: inline-block; + vertical-align: top; + text-decoration: none; + padding: 9px 30px; + text-align: left; + cursor: pointer; + color: #222222; + background-color: #dddddd; +} +.w-tab-link.w--current { + background-color: #c8c8c8; +} +.w-tab-link:focus { + outline: 0; +} +.w-tab-content { + position: relative; + display: block; + overflow: hidden; +} +.w-tab-pane { + position: relative; + display: none; +} +.w--tab-active { + display: block; +} +@media screen and (max-width: 479px) { + .w-tab-link { + display: block; + } +} +.w-ix-emptyfix:after { + content: ""; +} +@keyframes spin { + 0% { + transform: rotate(0deg); + } + 100% { + transform: rotate(360deg); + } +} +.w-dyn-empty { + padding: 10px; + background-color: #dddddd; +} +.w-dyn-hide { + display: none !important; +} +.w-dyn-bind-empty { + display: none !important; +} +.w-condition-invisible { + display: none !important; +} + +/* ========================================================================== + Start of custom Webflow CSS + ========================================================================== */ +a { + color: #000; + text-decoration: none; +} + +.utility-page-wrap { + display: -webkit-box; + display: -webkit-flex; + display: -ms-flexbox; + display: flex; + width: 100vw; + height: 100vh; + max-height: 100%; + max-width: 100%; + -webkit-box-pack: center; + -webkit-justify-content: center; + -ms-flex-pack: center; + justify-content: center; + -webkit-box-align: center; + -webkit-align-items: center; + -ms-flex-align: center; + align-items: center; +} + +.utility-page-content { + display: -webkit-box; + display: -webkit-flex; + display: -ms-flexbox; + display: flex; + width: 260px; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -webkit-flex-direction: column; + -ms-flex-direction: column; + flex-direction: column; + text-align: center; +} + +.utility-page-form { + display: -webkit-box; + display: -webkit-flex; + display: -ms-flexbox; + display: flex; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -webkit-flex-direction: column; + -ms-flex-direction: column; + flex-direction: column; + -webkit-box-align: stretch; + -webkit-align-items: stretch; + -ms-flex-align: stretch; + align-items: stretch; +} + +.image-3 { + border: 2px solid #000; + border-radius: 20px; + box-shadow: 1px 1px 3px 0 #000; +} + +.accordion-item-trigger { + display: -webkit-box; + display: -webkit-flex; + display: -ms-flexbox; + display: flex; + -webkit-box-pack: justify; + -webkit-justify-content: space-between; + -ms-flex-pack: justify; + justify-content: space-between; + -webkit-box-align: start; + -webkit-align-items: flex-start; + -ms-flex-align: start; + align-items: flex-start; +} + +.section-2 { + display: -webkit-box; + display: -webkit-flex; + display: -ms-flexbox; + display: flex; + width: 100vw; + height: 100vh; + -webkit-box-pack: center; + -webkit-justify-content: center; + -ms-flex-pack: center; + justify-content: center; + -webkit-box-align: center; + -webkit-align-items: center; + -ms-flex-align: center; + align-items: center; +} + +.heading-13 { + color: #0300b1; +} + +.section-3 { + display: -webkit-box; + display: -webkit-flex; + display: -ms-flexbox; + display: flex; + padding: 20px; + -webkit-box-orient: horizontal; + -webkit-box-direction: normal; + -webkit-flex-direction: row; + -ms-flex-direction: row; + flex-direction: row; + -webkit-box-pack: center; + -webkit-justify-content: center; + -ms-flex-pack: center; + justify-content: center; + -webkit-box-align: center; + -webkit-align-items: center; + -ms-flex-align: center; + align-items: center; +} + +.div-block { + width: 50vw; + min-height: 100vh; + padding-right: 2rem; + padding-left: 2rem; + -webkit-align-self: flex-start; + -ms-flex-item-align: start; + align-self: flex-start; +} + +.div-block.div-block-12 { + -webkit-align-self: flex-start; + -ms-flex-item-align: start; + align-self: flex-start; +} + +.heading-14 { + margin-bottom: 40px; + text-align: center; +} + +.heading-14.heading-16 { + margin-bottom: 60px; +} + +.div-block-2 { + min-height: 50vh; + border: 3px solid #000; + border-radius: 25px; +} + +.heading-15 { + text-align: center; +} + +.div-block-3 { + min-height: 30vh; +} + +.div-block-4 { + display: -ms-grid; + display: grid; + padding-top: 20px; + padding-bottom: 20px; + -webkit-box-pack: center; + -webkit-justify-content: center; + -ms-flex-pack: center; + justify-content: center; + grid-auto-columns: 1fr; + grid-column-gap: 16px; + grid-row-gap: 16px; + -ms-grid-columns: 1fr 1fr 1fr; + grid-template-columns: 1fr 1fr 1fr; + -ms-grid-rows: auto; + grid-template-rows: auto; +} + +.button { + border-radius: 10px; + background-color: #7db56f; + color: #000; + text-align: center; +} + +.button-2 { + border-radius: 10px; + background-color: #cece3d; + color: #000; + text-align: center; +} + +.button-3 { + border-radius: 10px; + background-color: #ec383b; + color: #000; + text-align: center; +} + +.button-4 { + border-radius: 10px; + background-color: #b9b9b9; + color: #000; + text-align: center; +} + +.div-block-5 { + display: -ms-grid; + display: grid; + padding-top: 20px; + padding-bottom: 20px; + -webkit-box-pack: center; + -webkit-justify-content: center; + -ms-flex-pack: center; + justify-content: center; + -webkit-box-align: center; + -webkit-align-items: center; + -ms-flex-align: center; + align-items: center; + grid-auto-columns: 1fr; + grid-column-gap: 16px; + grid-row-gap: 16px; + -ms-grid-columns: 1fr; + grid-template-columns: 1fr; + -ms-grid-rows: auto; + grid-template-rows: auto; +} + +.div-block-6 { + display: -webkit-box; + display: -webkit-flex; + display: -ms-flexbox; + display: flex; + -webkit-justify-content: space-around; + -ms-flex-pack: distribute; + justify-content: space-around; + justify-items: center; + grid-auto-columns: 1fr; + -ms-grid-columns: 1fr; + grid-template-columns: 1fr; + -ms-grid-rows: auto; + grid-template-rows: auto; +} + +.div-block-7 { + width: 60%; +} + +.text-block { + color: #000; +} + +.div-block-9 { + margin-top: 40px; + margin-bottom: 40px; + border-radius: 5px; +} + +.div-block-10 { + display: -ms-grid; + display: grid; + margin-top: 20px; + margin-bottom: 40px; + -webkit-box-pack: justify; + -webkit-justify-content: space-between; + -ms-flex-pack: justify; + justify-content: space-between; + grid-auto-columns: 1fr; + grid-column-gap: 16px; + grid-row-gap: 16px; + -ms-grid-columns: 1fr; + grid-template-columns: 1fr; + -ms-grid-rows: auto; + grid-template-rows: auto; +} + +.div-block-11 { + min-height: 20vh; +} + +.text-block-2 { + padding-top: 20px; + text-align: justify; +} + + +@media screen and (min-width: 1440px) { + .heading-14 { + margin-bottom: 40px; + } + + .div-block-11 { + min-height: 20vh; + } +} + +@media screen and (max-width: 991px) { + .section-3 { + padding-right: 0px; + padding-left: 0px; + -webkit-box-orient: vertical; + -webkit-box-direction: normal; + -webkit-flex-direction: column; + -ms-flex-direction: column; + flex-direction: column; + } + + .div-block { + width: 100vw; + } + + .div-block.div-block-12 { + min-height: auto; + -webkit-box-ordinal-group: 0; + -webkit-order: -1; + -ms-flex-order: -1; + order: -1; + } + + .div-block-11 { + min-height: auto; + } + + + + .div-block-14 { + display: block; + padding-top: 20px; + padding-bottom: 20px; + } + + .div-block-15 { + padding-top: 20px; + padding-bottom: 20px; + } +} + +@media screen and (max-width: 767px) { + .div-block-2 { + min-height: 35vh; + } +} + +@media screen and (max-width: 479px) { + .section-3 { + padding-right: 0px; + padding-left: 0px; + } + + .div-block { + padding-right: 1rem; + padding-left: 1rem; + } + + .heading-14 { + margin-bottom: 40px; + } + + .div-block-2 { + min-height: 20vh; + } + + .heading-15 { + margin-bottom: 40px; + } + + .div-block-15 { + padding-top: 0px; + padding-bottom: 0px; + } +} + +#w-node-_4d7ead06-e94c-11f3-f394-98f06c51ba1d-74d2aed9 { + -ms-grid-column: span 1; + grid-column-start: span 1; + -ms-grid-column-span: 1; + grid-column-end: span 1; + -ms-grid-row: span 1; + grid-row-start: span 1; + -ms-grid-row-span: 1; + grid-row-end: span 1; +} + +#w-node-_8c67cacc-8dbe-dd48-7518-a75261770221-74d2aed9 { + -ms-grid-column: span 1; + grid-column-start: span 1; + -ms-grid-column-span: 1; + grid-column-end: span 1; + -ms-grid-row: span 1; + grid-row-start: span 1; + -ms-grid-row-span: 1; + grid-row-end: span 1; +} + +#w-node-b6afa4b8-5d2d-f2ea-ef09-eddce96f5dc0-74d2aed9 { + -ms-grid-column: span 1; + grid-column-start: span 1; + -ms-grid-column-span: 1; + grid-column-end: span 1; + -ms-grid-row: span 1; + grid-row-start: span 1; + -ms-grid-row-span: 1; + grid-row-end: span 1; +} diff --git a/static/styles.css b/static/styles.css new file mode 100644 index 0000000000000000000000000000000000000000..180fde3b7ec021f81848569427718f586610d706 --- /dev/null +++ b/static/styles.css @@ -0,0 +1,48 @@ +/* latin-ext */ +@font-face { + font-family: 'Oxanium'; + font-style: normal; + font-weight: 400; + src: url(https://fonts.gstatic.com/s/oxanium/v12/RrQQboN_4yJ0JmiMe2zE0ZJCZ4cQGg.woff2) format('woff2'); + unicode-range: U+0100-024F, U+0259, U+1E00-1EFF, U+2020, U+20A0-20AB, U+20AD-20CF, U+2113, U+2C60-2C7F, U+A720-A7FF; +} +/* latin */ +@font-face { + font-family: 'Oxanium'; + font-style: normal; + font-weight: 400; + src: url(https://fonts.gstatic.com/s/oxanium/v12/RrQQboN_4yJ0JmiMe2LE0ZJCZ4c.woff2) format('woff2'); + unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD; +} +/* latin-ext */ +@font-face { + font-family: 'Oxanium'; + font-style: normal; + font-weight: 500; + src: url(https://fonts.gstatic.com/s/oxanium/v12/RrQQboN_4yJ0JmiMe2zE0ZJCZ4cQGg.woff2) format('woff2'); + unicode-range: U+0100-024F, U+0259, U+1E00-1EFF, U+2020, U+20A0-20AB, U+20AD-20CF, U+2113, U+2C60-2C7F, U+A720-A7FF; +} +/* latin */ +@font-face { + font-family: 'Oxanium'; + font-style: normal; + font-weight: 500; + src: url(https://fonts.gstatic.com/s/oxanium/v12/RrQQboN_4yJ0JmiMe2LE0ZJCZ4c.woff2) format('woff2'); + unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD; +} +/* latin-ext */ +@font-face { + font-family: 'Oxanium'; + font-style: normal; + font-weight: 600; + src: url(https://fonts.gstatic.com/s/oxanium/v12/RrQQboN_4yJ0JmiMe2zE0ZJCZ4cQGg.woff2) format('woff2'); + unicode-range: U+0100-024F, U+0259, U+1E00-1EFF, U+2020, U+20A0-20AB, U+20AD-20CF, U+2113, U+2C60-2C7F, U+A720-A7FF; +} +/* latin */ +@font-face { + font-family: 'Oxanium'; + font-style: normal; + font-weight: 600; + src: url(https://fonts.gstatic.com/s/oxanium/v12/RrQQboN_4yJ0JmiMe2LE0ZJCZ4c.woff2) format('woff2'); + unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD; +} diff --git a/temp_img/9688418.jpg b/temp_img/9688418.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb7d08ab042dfa8436063831c663e2dde36a642e Binary files /dev/null and b/temp_img/9688418.jpg differ diff --git a/templates/audio/A_female.mp3 b/templates/audio/A_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..404761b013deff86bb9bff0cf458c0eb03941d63 Binary files /dev/null and b/templates/audio/A_female.mp3 differ diff --git a/templates/audio/A_male.mp3 b/templates/audio/A_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..f85efd09dbc6171e3a35b6b3014344b9efb89b46 Binary files /dev/null and b/templates/audio/A_male.mp3 differ diff --git a/templates/audio/B_female.mp3 b/templates/audio/B_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..7af11c6e879a542c72b3f90caa5b1f94aa933f62 Binary files /dev/null and b/templates/audio/B_female.mp3 differ diff --git a/templates/audio/B_male.mp3 b/templates/audio/B_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..821e377b8def050aba56e47cf41714bb6bea7847 Binary files /dev/null and b/templates/audio/B_male.mp3 differ diff --git a/templates/audio/C_female.mp3 b/templates/audio/C_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..fc28ecfd0e03b4e6ade6596df7ca064b9a939210 Binary files /dev/null and b/templates/audio/C_female.mp3 differ diff --git a/templates/audio/C_male.mp3 b/templates/audio/C_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..20e501f564e97f053dc0b866394bcd04df383544 Binary files /dev/null and b/templates/audio/C_male.mp3 differ diff --git a/templates/audio/D_female.mp3 b/templates/audio/D_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..ef0df6ae1b169ef6b8026705bc0a48bb2a3cdb0c Binary files /dev/null and b/templates/audio/D_female.mp3 differ diff --git a/templates/audio/D_male.mp3 b/templates/audio/D_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..906370729ed08b3201cc246189109001d617515b Binary files /dev/null and b/templates/audio/D_male.mp3 differ diff --git a/templates/audio/E_female.mp3 b/templates/audio/E_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..41b33fc2198b9e411b71eb9c80d156d641b6978a Binary files /dev/null and b/templates/audio/E_female.mp3 differ diff --git a/templates/audio/E_male.mp3 b/templates/audio/E_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..7fe046a48a401ee2ba1ea854b3e705aaca803d9b Binary files /dev/null and b/templates/audio/E_male.mp3 differ diff --git a/templates/audio/F_female.mp3 b/templates/audio/F_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..7244852b4b3430c229fb997180f95cd27cf166c6 Binary files /dev/null and b/templates/audio/F_female.mp3 differ diff --git a/templates/audio/F_male.mp3 b/templates/audio/F_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..42684bc4e73ab74bbdbf4d4a6e9a2c3ef0dffe6a Binary files /dev/null and b/templates/audio/F_male.mp3 differ diff --git a/templates/audio/G_female.mp3 b/templates/audio/G_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..76ade5eb2af64c814d5969f2ed19f46559700ecb Binary files /dev/null and b/templates/audio/G_female.mp3 differ diff --git a/templates/audio/G_male.mp3 b/templates/audio/G_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..55770dfeef732cf5ada9393be50747adb3c62285 Binary files /dev/null and b/templates/audio/G_male.mp3 differ diff --git a/templates/audio/H_female.mp3 b/templates/audio/H_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..f6def8a1812d4084959566d55dbad3bb27f7283f Binary files /dev/null and b/templates/audio/H_female.mp3 differ diff --git a/templates/audio/H_male.mp3 b/templates/audio/H_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..72d8dcee2c682e681ba364b026c3487abbea69cb Binary files /dev/null and b/templates/audio/H_male.mp3 differ diff --git a/templates/audio/I_female.mp3 b/templates/audio/I_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..8e1b5ec63a2b26dc8735efe1a3b932fb5ec7e27c Binary files /dev/null and b/templates/audio/I_female.mp3 differ diff --git a/templates/audio/I_male.mp3 b/templates/audio/I_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..64e1097de580c29cf92c6bb4fb94d796c8d24603 Binary files /dev/null and b/templates/audio/I_male.mp3 differ diff --git a/templates/audio/J_female.mp3 b/templates/audio/J_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..7e55b0dfa4e503f519241f41bacacfa09475df37 Binary files /dev/null and b/templates/audio/J_female.mp3 differ diff --git a/templates/audio/J_male.mp3 b/templates/audio/J_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..0dd7fe4d3943a7bf7e3445813c79df3862b4bdb7 Binary files /dev/null and b/templates/audio/J_male.mp3 differ diff --git a/templates/audio/K_female.mp3 b/templates/audio/K_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..db9115d82d5716d008645112bfb7fbab586a2aed Binary files /dev/null and b/templates/audio/K_female.mp3 differ diff --git a/templates/audio/K_male.mp3 b/templates/audio/K_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..d6062bda7803bb261017623fefac62db6b7fa5df Binary files /dev/null and b/templates/audio/K_male.mp3 differ diff --git a/templates/audio/L_female.mp3 b/templates/audio/L_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..7b962861c9c050e8763489f388cb78a776b9a0aa Binary files /dev/null and b/templates/audio/L_female.mp3 differ diff --git a/templates/audio/L_male.mp3 b/templates/audio/L_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..ca1e4e2ad979e81f77d23c7ef4d83f5b1f2552ab Binary files /dev/null and b/templates/audio/L_male.mp3 differ diff --git a/templates/audio/M_female.mp3 b/templates/audio/M_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..dabc81d1e835a813497250a388efa9d4f570cb5c Binary files /dev/null and b/templates/audio/M_female.mp3 differ diff --git a/templates/audio/M_male.mp3 b/templates/audio/M_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..0d2f3dc83ed5fda2aef6bc5e94edca4f0aa7ed8b Binary files /dev/null and b/templates/audio/M_male.mp3 differ diff --git a/templates/audio/N_female.mp3 b/templates/audio/N_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..0856f8f54eef5d2f98330f5b9c80ad56e7a7a8d7 Binary files /dev/null and b/templates/audio/N_female.mp3 differ diff --git a/templates/audio/N_male.mp3 b/templates/audio/N_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..c440201ed03ee090f83d33bd871406196a575275 Binary files /dev/null and b/templates/audio/N_male.mp3 differ diff --git a/templates/audio/O_female.mp3 b/templates/audio/O_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..0f78a2cbf95512fb1c2337bdc7bb68599ff324f3 Binary files /dev/null and b/templates/audio/O_female.mp3 differ diff --git a/templates/audio/O_male.mp3 b/templates/audio/O_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..ab511026ef06c6a68459bb9dbf48bc2ee8845776 Binary files /dev/null and b/templates/audio/O_male.mp3 differ diff --git a/templates/audio/P_female.mp3 b/templates/audio/P_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..88d55f93ddb414e93563adc21f70e3be98014ded Binary files /dev/null and b/templates/audio/P_female.mp3 differ diff --git a/templates/audio/P_male.mp3 b/templates/audio/P_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..d2545f7ce429bc77456f00e923b73ff1d27c02c1 Binary files /dev/null and b/templates/audio/P_male.mp3 differ diff --git a/templates/audio/Q_female.mp3 b/templates/audio/Q_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..099c51a5bd6b06d685eb519c6a132c4099d9908a Binary files /dev/null and b/templates/audio/Q_female.mp3 differ diff --git a/templates/audio/Q_male.mp3 b/templates/audio/Q_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..5ae445ecb0fc5aecb6a05cabbc71b1bb28a3f013 Binary files /dev/null and b/templates/audio/Q_male.mp3 differ diff --git a/templates/audio/R_female.mp3 b/templates/audio/R_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..7b2f3edf017e32cbcccb8cee95e7252c2f2d44cf Binary files /dev/null and b/templates/audio/R_female.mp3 differ diff --git a/templates/audio/R_male.mp3 b/templates/audio/R_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..7af692b19164414c444d81c3bb83cd6dbb5edc3c Binary files /dev/null and b/templates/audio/R_male.mp3 differ diff --git a/templates/audio/S_female.mp3 b/templates/audio/S_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..a78e333d46774cdcfbb9beb00a75b6048656e811 Binary files /dev/null and b/templates/audio/S_female.mp3 differ diff --git a/templates/audio/S_male.mp3 b/templates/audio/S_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..66e48c9b9d6e31f7e2e6b0bfe496bcd597161bb0 Binary files /dev/null and b/templates/audio/S_male.mp3 differ diff --git a/templates/audio/T_female.mp3 b/templates/audio/T_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..7f5c4e78d9017af8e6087d17a825942b83643c07 Binary files /dev/null and b/templates/audio/T_female.mp3 differ diff --git a/templates/audio/T_male.mp3 b/templates/audio/T_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..01c605c8f316707d522a207e1b15176650acabaf Binary files /dev/null and b/templates/audio/T_male.mp3 differ diff --git a/templates/audio/U_female.mp3 b/templates/audio/U_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..face8dd0e7087e454fd24b26aab2c998f95f7698 Binary files /dev/null and b/templates/audio/U_female.mp3 differ diff --git a/templates/audio/U_male.mp3 b/templates/audio/U_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..9b68342f849aee0197f672d850dbb4ca1091d96e Binary files /dev/null and b/templates/audio/U_male.mp3 differ diff --git a/templates/audio/V_female.mp3 b/templates/audio/V_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..4b803403f470df621236aa1d44ba7d33906ceaa7 Binary files /dev/null and b/templates/audio/V_female.mp3 differ diff --git a/templates/audio/V_male.mp3 b/templates/audio/V_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..44de02458840c6761cc6d14a165e7e78fbc2abe5 Binary files /dev/null and b/templates/audio/V_male.mp3 differ diff --git a/templates/audio/W_female.mp3 b/templates/audio/W_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..095e4be7023f7676f98f3ba0bc116380c961a1cc Binary files /dev/null and b/templates/audio/W_female.mp3 differ diff --git a/templates/audio/W_male.mp3 b/templates/audio/W_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..c99f3dbfae7772aa03d8dd5b2a783537bf658e61 Binary files /dev/null and b/templates/audio/W_male.mp3 differ diff --git a/templates/audio/X_female.mp3 b/templates/audio/X_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..128821d9e52873f7b787218b3ed198035ff17326 Binary files /dev/null and b/templates/audio/X_female.mp3 differ diff --git a/templates/audio/X_male.mp3 b/templates/audio/X_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..f0120bc52f0edf4f12962332257ac0953f6cff98 Binary files /dev/null and b/templates/audio/X_male.mp3 differ diff --git a/templates/audio/Y_female.mp3 b/templates/audio/Y_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..20190c87ca12e5493ae573a63d4508fbfeb6b5a6 Binary files /dev/null and b/templates/audio/Y_female.mp3 differ diff --git a/templates/audio/Y_male.mp3 b/templates/audio/Y_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..abe204bec42f617d1c889cc4e49cb6ecedab7378 Binary files /dev/null and b/templates/audio/Y_male.mp3 differ diff --git a/templates/audio/Z_female.mp3 b/templates/audio/Z_female.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..7154d0bcd68ca714b9ebab26671562c5fbc27ae5 Binary files /dev/null and b/templates/audio/Z_female.mp3 differ diff --git a/templates/audio/Z_male.mp3 b/templates/audio/Z_male.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..3d53c45ba1ffa3d62b002ff90b8c2ac7289190c9 Binary files /dev/null and b/templates/audio/Z_male.mp3 differ diff --git a/templates/download.png b/templates/download.png new file mode 100644 index 0000000000000000000000000000000000000000..a401129eb3aadff55e7d299ed7f86dfd02e110bd Binary files /dev/null and b/templates/download.png differ diff --git a/templates/index.html b/templates/index.html new file mode 100644 index 0000000000000000000000000000000000000000..e3cb5c3b3d173df7a94068648e10f0924f1e517a --- /dev/null +++ b/templates/index.html @@ -0,0 +1,314 @@ + + + + + + + IOT Project + + + + + + +
+
+

Live Camera Preview

+
+ +
+
+

Current Snapshot

+
+ +
+
+
+

Controls

+
+
+
+ Select your preferred voice: +
+
+
+
+ +
+
+
+
+ +
+ +
+
+
+

Manual Upload

+
+
+ +
+ + +
+
+
+

Output

+
+
+
+
+ + +
+
+
+
+
+ + + diff --git a/train.py b/train.py new file mode 100644 index 0000000000000000000000000000000000000000..4610b61e423d5bda59c60487d98c435ff24a9ea5 --- /dev/null +++ b/train.py @@ -0,0 +1,1022 @@ +""" +The program applies Transfer Learning to this existing model and re-trains it to classify a new set of images. + +This example shows how to take a Inception v3 architecture model trained on ImageNet images, +and train a new top layer that can recognize other classes of images. + +You can replace the image_dir argument with any folder containing subfolders of +images. The label for each image is taken from the name of the subfolder it's in. + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +#from datetime import datetime +import hashlib +import os.path +import random +import re +import struct +import sys +import tarfile + +import numpy as np +from six.moves import urllib +import tensorflow.compat.v1 as tf + +from tensorflow.python.framework import graph_util +from tensorflow.python.framework import tensor_shape +from tensorflow.python.platform import gfile +from tensorflow.python.util import compat + +FLAGS = None + +# These are all parameters that are tied to the particular model architecture +# we're using for Inception v3. These include things like tensor names and their +# sizes. If you want to adapt this script to work with another model, you will +# need to update these to reflect the values in the network you're using. +DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz' + +BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0' +BOTTLENECK_TENSOR_SIZE = 2048 +MODEL_INPUT_WIDTH = 299 +MODEL_INPUT_HEIGHT = 299 +MODEL_INPUT_DEPTH = 3 +JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0' +RESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0' +MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M + + +def create_image_lists(image_dir, testing_percentage, validation_percentage): + """ + Brief: + Builds a list of training images from the file system. + Analyzes the sub folders in the image directory, splits them into stable + training, testing, and validation sets, and returns a data structure + describing the lists of images for each label and their paths. + Args: + image_dir: String path to a folder containing subfolders of images. + testing_percentage: Integer percentage of the images to reserve for tests. + validation_percentage: Integer percentage of images reserved for validation. + Returns: + A dictionary containing an entry for each label subfolder, with images split + into training, testing, and validation sets within each label. + """ + if not gfile.Exists(image_dir): + print("Image directory '" + image_dir + "' not found.") + return None + result = {} + sub_dirs = [x[0] for x in gfile.Walk(image_dir)] + # The root directory comes first, so skip it. + is_root_dir = True + for sub_dir in sub_dirs: + if is_root_dir: + is_root_dir = False + continue + extensions = ['jpg', 'jpeg', 'JPG', 'JPEG'] + file_list = [] + dir_name = os.path.basename(sub_dir) + if dir_name == image_dir: + continue + print("Looking for images in '" + dir_name + "'") + for extension in extensions: + file_glob = os.path.join(image_dir, dir_name, '*.' + extension) + file_list.extend(gfile.Glob(file_glob)) + if not file_list: + print('No files found') + continue + if len(file_list) < 20: + print('WARNING: Folder has less than 20 images, which may cause issues.') + elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS: + print('WARNING: Folder {} has more than {} images. Some images will ' + 'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS)) + label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower()) + training_images = [] + testing_images = [] + validation_images = [] + for file_name in file_list: + base_name = os.path.basename(file_name) + # We want to ignore anything after '_nohash_' in the file name when + # deciding which set to put an image in, the data set creator has a way of + # grouping photos that are close variations of each other. For example + # this is used in the plant disease data set to group multiple pictures of + # the same leaf. + hash_name = re.sub(r'_nohash_.*$', '', file_name) + # This looks a bit magical, but we need to decide whether this file should + # go into the training, testing, or validation sets, and we want to keep + # existing files in the same set even if more files are subsequently + # added. + # To do that, we need a stable way of deciding based on just the file name + # itself, so we do a hash of that and then use that to generate a + # probability value that we use to assign it. + hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest() + percentage_hash = ((int(hash_name_hashed, 16) % + (MAX_NUM_IMAGES_PER_CLASS + 1)) * + (100.0 / MAX_NUM_IMAGES_PER_CLASS)) + if percentage_hash < validation_percentage: + validation_images.append(base_name) + elif percentage_hash < (testing_percentage + validation_percentage): + testing_images.append(base_name) + else: + training_images.append(base_name) + result[label_name] = { + 'dir': dir_name, + 'training': training_images, + 'testing': testing_images, + 'validation': validation_images, + } + return result + + +def get_image_path(image_lists, label_name, index, image_dir, category): + """" + Brief: + Returns a path to an image for a label at the given index. + Args: + image_lists: Dictionary of training images for each label. + label_name: Label string we want to get an image for. + index: Int offset of the image we want. This will be moduloed by the + available number of images for the label, so it can be arbitrarily large. + image_dir: Root folder string of the subfolders containing the training images. + category: Name string of set to pull images from - training, testing, or validation. + Returns: + File system path string to an image that meets the requested parameters. + """ + if label_name not in image_lists: + tf.logging.fatal('Label does not exist %s.', label_name) + label_lists = image_lists[label_name] + if category not in label_lists: + tf.logging.fatal('Category does not exist %s.', category) + category_list = label_lists[category] + if not category_list: + tf.logging.fatal('Label %s has no images in the category %s.', label_name, category) + mod_index = index % len(category_list) + base_name = category_list[mod_index] + sub_dir = label_lists['dir'] + full_path = os.path.join(image_dir, sub_dir, base_name) + return full_path + + +def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir, category): + """" + Brief: + Returns a path to a bottleneck file for a label at the given index. + Args: + image_lists: Dictionary of training images for each label. + label_name: Label string we want to get an image for. + index: Integer offset of the image we want. This will be moduloed by the + available number of images for the label, so it can be arbitrarily large. + bottleneck_dir: Folder string holding cached files of bottleneck values. + category: Name string of set to pull images from - training, testing, or validation. + Returns: + File system path string to an image that meets the requested parameters. + """ + return get_image_path(image_lists, label_name, index, bottleneck_dir, + category) + '.txt' + + +def create_inception_graph(): + """" + Brief: + Creates a graph from saved GraphDef file and returns a Graph object. + Returns: + Graph holding the trained Inception network, and various tensors we'll be + manipulating. + """ + with tf.Graph().as_default() as graph: + model_filename = os.path.join(FLAGS.model_dir, 'classify_image_graph_def.pb') + with gfile.FastGFile(model_filename, 'rb') as f: + graph_def = tf.GraphDef() + graph_def.ParseFromString(f.read()) + bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = ( + tf.import_graph_def(graph_def, name='', return_elements=[ + BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME, + RESIZED_INPUT_TENSOR_NAME])) + return graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor + + +def run_bottleneck_on_image(sess, image_data, image_data_tensor, bottleneck_tensor): + """" + Brief: + Runs inference on an image to extract the 'bottleneck' summary layer. + Args: + sess: Current active TensorFlow Session. + image_data: String of raw JPEG data. + image_data_tensor: Input data layer in the graph. + bottleneck_tensor: Layer before the final softmax. + Returns: + Numpy array of bottleneck values. + """ + bottleneck_values = sess.run( + bottleneck_tensor, + {image_data_tensor: image_data}) + bottleneck_values = np.squeeze(bottleneck_values) + return bottleneck_values + + +def maybe_download_and_extract(): + """ + Brief: + Download and extract model tar file. + If the pretrained model we're using doesn't already exist, this function + downloads it from the TensorFlow.org website and unpacks it into a directory. + """ + dest_directory = FLAGS.model_dir + if not os.path.exists(dest_directory): + os.makedirs(dest_directory) + filename = DATA_URL.split('/')[-1] + filepath = os.path.join(dest_directory, filename) + if not os.path.exists(filepath): + def _progress(count, block_size, total_size): + sys.stdout.write('\r>> Downloading %s %.1f%%' % + (filename, + float(count * block_size) / float(total_size) * 100.0)) + sys.stdout.flush() + + filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) + print() + statinfo = os.stat(filepath) + print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') + tarfile.open(filepath, 'r:gz').extractall(dest_directory) + + +def ensure_dir_exists(dir_name): + """ + Brief: + Makes sure the folder exists on disk. + Args: + dir_name: Path string to the folder we want to create. + """ + if not os.path.exists(dir_name): + os.makedirs(dir_name) + + +def write_list_of_floats_to_file(list_of_floats, file_path): + """ + Brief: + Writes a given list of floats to a binary file. + Args: + list_of_floats: List of floats we want to write to a file. + file_path: Path to a file where list of floats will be stored. + """ + s = struct.pack('d' * BOTTLENECK_TENSOR_SIZE, *list_of_floats) + with open(file_path, 'wb') as f: + f.write(s) + + +def read_list_of_floats_from_file(file_path): + """ + Brief: + Reads list of floats from a given file. + Args: + file_path: Path to a file where list of floats was stored. + Returns: + Array of bottleneck values (list of floats). + """ + with open(file_path, 'rb') as f: + s = struct.unpack('d' * BOTTLENECK_TENSOR_SIZE, f.read()) + return list(s) + + +bottleneck_path_2_bottleneck_values = {} + + +def create_bottleneck_file(bottleneck_path, image_lists, label_name, index, + image_dir, category, sess, jpeg_data_tensor, + bottleneck_tensor): + """Create a single bottleneck file.""" + print('Creating bottleneck at ' + bottleneck_path) + image_path = get_image_path(image_lists, label_name, index, + image_dir, category) + if not gfile.Exists(image_path): + tf.logging.fatal('File does not exist %s', image_path) + image_data = gfile.FastGFile(image_path, 'rb').read() + try: + bottleneck_values = run_bottleneck_on_image( + sess, image_data, jpeg_data_tensor, bottleneck_tensor) + except: + raise RuntimeError('Error during processing file %s' % image_path) + + bottleneck_string = ','.join(str(x) for x in bottleneck_values) + with open(bottleneck_path, 'w') as bottleneck_file: + bottleneck_file.write(bottleneck_string) + + +def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir, + category, bottleneck_dir, jpeg_data_tensor, + bottleneck_tensor): + """ + Brief: + Retrieves or calculates bottleneck values for an image. + + If a cached version of the bottleneck data exists on-disk, return that, + otherwise calculate the data and save it to disk for future use. + Args: + sess: The current active TensorFlow Session. + image_lists: Dictionary of training images for each label. + label_name: Label string we want to get an image for. + index: Integer offset of the image we want. This will be modulo-ed by the + available number of images for the label, so it can be arbitrarily large. + image_dir: Root folder string of the subfolders containing the training + images. + category: Name string of which set to pull images from - training, testing, + or validation. + bottleneck_dir: Folder string holding cached files of bottleneck values. + jpeg_data_tensor: The tensor to feed loaded jpeg data into. + bottleneck_tensor: The output tensor for the bottleneck values. + Returns: + Numpy array of values produced by the bottleneck layer for the image. + """ + label_lists = image_lists[label_name] + sub_dir = label_lists['dir'] + sub_dir_path = os.path.join(bottleneck_dir, sub_dir) + ensure_dir_exists(sub_dir_path) + bottleneck_path = get_bottleneck_path(image_lists, label_name, index, + bottleneck_dir, category) + if not os.path.exists(bottleneck_path): + create_bottleneck_file(bottleneck_path, image_lists, label_name, index, + image_dir, category, sess, jpeg_data_tensor, + bottleneck_tensor) + with open(bottleneck_path, 'r') as bottleneck_file: + bottleneck_string = bottleneck_file.read() + did_hit_error = False + try: + bottleneck_values = [float(x) for x in bottleneck_string.split(',')] + except ValueError: + print('Invalid float found, recreating bottleneck') + did_hit_error = True + if did_hit_error: + create_bottleneck_file(bottleneck_path, image_lists, label_name, index, + image_dir, category, sess, jpeg_data_tensor, + bottleneck_tensor) + with open(bottleneck_path, 'r') as bottleneck_file: + bottleneck_string = bottleneck_file.read() + # Allow exceptions to propagate here, since they shouldn't happen after a + # fresh creation + bottleneck_values = [float(x) for x in bottleneck_string.split(',')] + return bottleneck_values + + +def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir, + jpeg_data_tensor, bottleneck_tensor): + """ + Brief: + Ensures all the training, testing, and validation bottlenecks are cached. + + Because we're likely to read the same image multiple times (if there are no + distortions applied during training) it can speed things up a lot if we + calculate the bottleneck layer values once for each image during + preprocessing, and then just read those cached values repeatedly during + training. Here we go through all the images we've found, calculate those + values, and save them off. + Args: + sess: The current active TensorFlow Session. + image_lists: Dictionary of training images for each label. + image_dir: Root folder string of the subfolders containing the training + images. + bottleneck_dir: Folder string holding cached files of bottleneck values. + jpeg_data_tensor: Input tensor for jpeg data from file. + bottleneck_tensor: The penultimate output layer of the graph. + Returns: + Nothing. + """ + how_many_bottlenecks = 0 + ensure_dir_exists(bottleneck_dir) + for label_name, label_lists in image_lists.items(): + for category in ['training', 'testing', 'validation']: + category_list = label_lists[category] + for index, unused_base_name in enumerate(category_list): + get_or_create_bottleneck(sess, image_lists, label_name, index, + image_dir, category, bottleneck_dir, + jpeg_data_tensor, bottleneck_tensor) + + how_many_bottlenecks += 1 + if how_many_bottlenecks % 100 == 0: + print(str(how_many_bottlenecks) + ' bottleneck files created.') + + +def get_random_cached_bottlenecks(sess, image_lists, how_many, category, + bottleneck_dir, image_dir, jpeg_data_tensor, + bottleneck_tensor): + """ + Brief: + Retrieves bottleneck values for cached images. + + If no distortions are being applied, this function can retrieve the cached + bottleneck values directly from disk for images. It picks a random set of + images from the specified category. + Args: + sess: Current TensorFlow Session. + image_lists: Dictionary of training images for each label. + how_many: If positive, a random sample of this size will be chosen. + If negative, all bottlenecks will be retrieved. + category: Name string of which set to pull from - training, testing, or + validation. + bottleneck_dir: Folder string holding cached files of bottleneck values. + image_dir: Root folder string of the subfolders containing the training + images. + jpeg_data_tensor: The layer to feed jpeg image data into. + bottleneck_tensor: The bottleneck output layer of the CNN graph. + Returns: + List of bottleneck arrays, their corresponding ground truths, and the + relevant filenames. + """ + class_count = len(image_lists.keys()) + bottlenecks = [] + ground_truths = [] + filenames = [] + if how_many >= 0: + # Retrieve a random sample of bottlenecks. + for unused_i in range(how_many): + label_index = random.randrange(class_count) + label_name = list(image_lists.keys())[label_index] + image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1) + image_name = get_image_path(image_lists, label_name, image_index, + image_dir, category) + bottleneck = get_or_create_bottleneck(sess, image_lists, label_name, + image_index, image_dir, category, + bottleneck_dir, jpeg_data_tensor, + bottleneck_tensor) + ground_truth = np.zeros(class_count, dtype=np.float32) + ground_truth[label_index] = 1.0 + bottlenecks.append(bottleneck) + ground_truths.append(ground_truth) + filenames.append(image_name) + else: + # Retrieve all bottlenecks. + for label_index, label_name in enumerate(image_lists.keys()): + for image_index, image_name in enumerate( + image_lists[label_name][category]): + image_name = get_image_path(image_lists, label_name, image_index, + image_dir, category) + bottleneck = get_or_create_bottleneck(sess, image_lists, label_name, + image_index, image_dir, category, + bottleneck_dir, jpeg_data_tensor, + bottleneck_tensor) + ground_truth = np.zeros(class_count, dtype=np.float32) + ground_truth[label_index] = 1.0 + bottlenecks.append(bottleneck) + ground_truths.append(ground_truth) + filenames.append(image_name) + return bottlenecks, ground_truths, filenames + + +def get_random_distorted_bottlenecks( + sess, image_lists, how_many, category, image_dir, input_jpeg_tensor, + distorted_image, resized_input_tensor, bottleneck_tensor): + """ + Brief: + Retrieves bottleneck values for training images, after distortions. + + If we're training with distortions like crops, scales, or flips, we have to + recalculate the full model for every image, and so we can't use cached + bottleneck values. Instead we find random images for the requested category, + run them through the distortion graph, and then the full graph to get the + bottleneck results for each. + Args: + sess: Current TensorFlow Session. + image_lists: Dictionary of training images for each label. + how_many: The integer number of bottleneck values to return. + category: Name string of which set of images to fetch - training, testing, + or validation. + image_dir: Root folder string of the subfolders containing the training + images. + input_jpeg_tensor: The input layer we feed the image data to. + distorted_image: The output node of the distortion graph. + resized_input_tensor: The input node of the recognition graph. + bottleneck_tensor: The bottleneck output layer of the CNN graph. + Returns: + List of bottleneck arrays and their corresponding ground truths. + """ + class_count = len(image_lists.keys()) + bottlenecks = [] + ground_truths = [] + for unused_i in range(how_many): + label_index = random.randrange(class_count) + label_name = list(image_lists.keys())[label_index] + image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1) + image_path = get_image_path(image_lists, label_name, image_index, image_dir, + category) + if not gfile.Exists(image_path): + tf.logging.fatal('File does not exist %s', image_path) + jpeg_data = gfile.FastGFile(image_path, 'rb').read() + # Note that we materialize the distorted_image_data as a numpy array before + # sending running inference on the image. This involves 2 memory copies and + # might be optimized in other implementations. + distorted_image_data = sess.run(distorted_image, + {input_jpeg_tensor: jpeg_data}) + bottleneck = run_bottleneck_on_image(sess, distorted_image_data, + resized_input_tensor, + bottleneck_tensor) + ground_truth = np.zeros(class_count, dtype=np.float32) + ground_truth[label_index] = 1.0 + bottlenecks.append(bottleneck) + ground_truths.append(ground_truth) + return bottlenecks, ground_truths + + +def should_distort_images(flip_left_right, random_crop, random_scale, + random_brightness): + """ + Brief: + Whether any distortions are enabled, from the input flags. + Args: + flip_left_right: Boolean whether to randomly mirror images horizontally. + random_crop: Integer percentage setting the total margin used around the + crop box. + random_scale: Integer percentage of how much to vary the scale by. + random_brightness: Integer range to randomly multiply the pixel values by. + Returns: + Boolean value indicating whether any distortions should be applied. + """ + return (flip_left_right or (random_crop != 0) or (random_scale != 0) or + (random_brightness != 0)) + + +def add_input_distortions(flip_left_right, random_crop, random_scale, + random_brightness): + """ + Brief: + Creates the operations to apply the specified distortions. + + During training it can help to improve the results if we run the images + through simple distortions like crops, scales, and flips. These reflect the + kind of variations we expect in the real world, and so can help train the + model to cope with natural data more effectively. Here we take the supplied + parameters and construct a network of operations to apply them to an image. + + Cropping + + Cropping is done by placing a bounding box at a random position in the full + image. The cropping parameter controls the size of that box relative to the + input image. If it's zero, then the box is the same size as the input and no + cropping is performed. If the value is 50%, then the crop box will be half the + width and height of the input. In a diagram it looks like this: + + < width > + +---------------------+ + | | + | width - crop% | + | < > | + | +------+ | + | | | | + | | | | + | | | | + | +------+ | + | | + | | + +---------------------+ + + Scaling + + Scaling is a lot like cropping, except that the bounding box is always + centered and its size varies randomly within the given range. For example if + the scale percentage is zero, then the bounding box is the same size as the + input and no scaling is applied. If it's 50%, then the bounding box will be in + a random range between half the width and height and full size. + Args: + flip_left_right: Boolean whether to randomly mirror images horizontally. + random_crop: Integer percentage setting the total margin used around the + crop box. + random_scale: Integer percentage of how much to vary the scale by. + random_brightness: Integer range to randomly multiply the pixel values by. + graph. + Returns: + The jpeg input layer and the distorted result tensor. + """ + + jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput') + decoded_image = tf.image.decode_jpeg(jpeg_data, channels=MODEL_INPUT_DEPTH) + decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32) + decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0) + margin_scale = 1.0 + (random_crop / 100.0) + resize_scale = 1.0 + (random_scale / 100.0) + margin_scale_value = tf.constant(margin_scale) + resize_scale_value = tf.random_uniform(tensor_shape.scalar(), + minval=1.0, + maxval=resize_scale) + scale_value = tf.multiply(margin_scale_value, resize_scale_value) + precrop_width = tf.multiply(scale_value, MODEL_INPUT_WIDTH) + precrop_height = tf.multiply(scale_value, MODEL_INPUT_HEIGHT) + precrop_shape = tf.stack([precrop_height, precrop_width]) + precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32) + precropped_image = tf.image.resize_bilinear(decoded_image_4d, + precrop_shape_as_int) + precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0]) + cropped_image = tf.random_crop(precropped_image_3d, + [MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH, + MODEL_INPUT_DEPTH]) + if flip_left_right: + flipped_image = tf.image.random_flip_left_right(cropped_image) + else: + flipped_image = cropped_image + brightness_min = 1.0 - (random_brightness / 100.0) + brightness_max = 1.0 + (random_brightness / 100.0) + brightness_value = tf.random_uniform(tensor_shape.scalar(), + minval=brightness_min, + maxval=brightness_max) + brightened_image = tf.multiply(flipped_image, brightness_value) + distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult') + return jpeg_data, distort_result + + +def variable_summaries(var): + """Attach a lot of summaries to a Tensor (for TensorBoard visualization).""" + with tf.name_scope('summaries'): + mean = tf.reduce_mean(var) + tf.summary.scalar('mean', mean) + with tf.name_scope('stddev'): + stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) + tf.summary.scalar('stddev', stddev) + tf.summary.scalar('max', tf.reduce_max(var)) + tf.summary.scalar('min', tf.reduce_min(var)) + tf.summary.histogram('histogram', var) + + +def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor): + """ + Brief: + Adds a new softmax and fully-connected layer for training. + + We need to retrain the top layer to identify our new classes, so this function + adds the right operations to the graph, along with some variables to hold the + weights, and then sets up all the gradients for the backward pass. + + The set up for the softmax and fully-connected layers is based on: + https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html + Args: + class_count: Integer of how many categories of things we're trying to + recognize. + final_tensor_name: Name string for the new final node that produces results. + bottleneck_tensor: The output of the main CNN graph. + Returns: + The tensors for the training and cross entropy results, and tensors for the + bottleneck input and ground truth input. + """ + with tf.name_scope('input'): + bottleneck_input = tf.placeholder_with_default( + bottleneck_tensor, shape=[None, BOTTLENECK_TENSOR_SIZE], + name='BottleneckInputPlaceholder') + + ground_truth_input = tf.placeholder(tf.float32, + [None, class_count], + name='GroundTruthInput') + + # Organizing the following ops as `final_training_ops` so they're easier + # to see in TensorBoard + layer_name = 'final_training_ops' + with tf.name_scope(layer_name): + with tf.name_scope('weights'): + initial_value = tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count], + stddev=0.001) + + layer_weights = tf.Variable(initial_value, name='final_weights') + + variable_summaries(layer_weights) + with tf.name_scope('biases'): + layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases') + variable_summaries(layer_biases) + with tf.name_scope('Wx_plus_b'): + logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases + tf.summary.histogram('pre_activations', logits) + + final_tensor = tf.nn.softmax(logits, name=final_tensor_name) + tf.summary.histogram('activations', final_tensor) + + with tf.name_scope('cross_entropy'): + cross_entropy = tf.nn.softmax_cross_entropy_with_logits( + labels=ground_truth_input, logits=logits) + with tf.name_scope('total'): + cross_entropy_mean = tf.reduce_mean(cross_entropy) + tf.summary.scalar('cross_entropy', cross_entropy_mean) + + with tf.name_scope('train'): + optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate) + train_step = optimizer.minimize(cross_entropy_mean) + + return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input, + final_tensor) + + +def add_evaluation_step(result_tensor, ground_truth_tensor): + """ + Brief: + Inserts the operations we need to evaluate the accuracy of our results. + Args: + result_tensor: The new final node that produces results. + ground_truth_tensor: The node we feed ground truth data + into. + Returns: + Tuple of (evaluation step, prediction). + """ + with tf.name_scope('accuracy'): + with tf.name_scope('correct_prediction'): + prediction = tf.argmax(result_tensor, 1) + correct_prediction = tf.equal( + prediction, tf.argmax(ground_truth_tensor, 1)) + with tf.name_scope('accuracy'): + evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) + tf.summary.scalar('accuracy', evaluation_step) + return evaluation_step, prediction + + +def main(_): + # Setup the directory we'll write summaries to for TensorBoard + if tf.gfile.Exists(FLAGS.summaries_dir): + tf.gfile.DeleteRecursively(FLAGS.summaries_dir) + tf.gfile.MakeDirs(FLAGS.summaries_dir) + + # Set up the pre-trained graph. + maybe_download_and_extract() + graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = ( + create_inception_graph()) + + # Look at the folder structure, and create lists of all the images. + image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage, + FLAGS.validation_percentage) + class_count = len(image_lists.keys()) + if class_count == 0: + print('No valid folders of images found at ' + FLAGS.image_dir) + return -1 + if class_count == 1: + print('Only one valid folder of images found at ' + FLAGS.image_dir + + ' - multiple classes are needed for classification.') + return -1 + + # See if the command-line flags mean we're applying any distortions. + do_distort_images = should_distort_images( + FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale, + FLAGS.random_brightness) + + with tf.Session(graph=graph) as sess: + + if do_distort_images: + # We will be applying distortions, so setup the operations we'll need. + (distorted_jpeg_data_tensor, + distorted_image_tensor) = add_input_distortions( + FLAGS.flip_left_right, FLAGS.random_crop, + FLAGS.random_scale, FLAGS.random_brightness) + else: + # We'll make sure we've calculated the 'bottleneck' image summaries and + # cached them on disk. + cache_bottlenecks(sess, image_lists, FLAGS.image_dir, + FLAGS.bottleneck_dir, jpeg_data_tensor, + bottleneck_tensor) + + # Add the new layer that we'll be training. + (train_step, cross_entropy, bottleneck_input, ground_truth_input, + final_tensor) = add_final_training_ops(len(image_lists.keys()), + FLAGS.final_tensor_name, + bottleneck_tensor) + + # Create the operations we need to evaluate the accuracy of our new layer. + evaluation_step, prediction = add_evaluation_step( + final_tensor, ground_truth_input) + + # Merge all the summaries and write them out to the summaries_dir + merged = tf.summary.merge_all() + train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train', + sess.graph) + + validation_writer = tf.summary.FileWriter( + FLAGS.summaries_dir + '/validation') + + # Set up all our weights to their initial default values. + init = tf.global_variables_initializer() + sess.run(init) + + # Run the training for as many cycles as requested on the command line. + for i in range(FLAGS.how_many_training_steps): + # Get a batch of input bottleneck values, either calculated fresh every + # time with distortions applied, or from the cache stored on disk. + if do_distort_images: + (train_bottlenecks, + train_ground_truth) = get_random_distorted_bottlenecks( + sess, image_lists, FLAGS.train_batch_size, 'training', + FLAGS.image_dir, distorted_jpeg_data_tensor, + distorted_image_tensor, resized_image_tensor, bottleneck_tensor) + else: + (train_bottlenecks, + train_ground_truth, _) = get_random_cached_bottlenecks( + sess, image_lists, FLAGS.train_batch_size, 'training', + FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, + bottleneck_tensor) + # Feed the bottlenecks and ground truth into the graph, and run a training + # step. Capture training summaries for TensorBoard with the `merged` op. + + train_summary, _ = sess.run( + [merged, train_step], + feed_dict={bottleneck_input: train_bottlenecks, + ground_truth_input: train_ground_truth}) + train_writer.add_summary(train_summary, i) + + # Every so often, print out how well the graph is training. + is_last_step = (i + 1 == FLAGS.how_many_training_steps) + if (i % FLAGS.eval_step_interval) == 0 or is_last_step: + train_accuracy, cross_entropy_value = sess.run( + [evaluation_step, cross_entropy], + feed_dict={bottleneck_input: train_bottlenecks, + ground_truth_input: train_ground_truth}) + validation_bottlenecks, validation_ground_truth, _ = ( + get_random_cached_bottlenecks( + sess, image_lists, FLAGS.validation_batch_size, 'validation', + FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, + bottleneck_tensor)) + # Run a validation step and capture training summaries for TensorBoard + # with the `merged` op. + validation_summary, validation_accuracy = sess.run( + [merged, evaluation_step], + feed_dict={bottleneck_input: validation_bottlenecks, + ground_truth_input: validation_ground_truth}) + validation_writer.add_summary(validation_summary, i) + print('Step: %d, Train accuracy: %.4f%%, Cross entropy: %f, Validation accuracy: %.1f%% (N=%d)' % (i, + train_accuracy * 100, cross_entropy_value, validation_accuracy * 100, len(validation_bottlenecks))) + + # We've completed all our training, so run a final test evaluation on + # some new images we haven't used before. + test_bottlenecks, test_ground_truth, test_filenames = ( + get_random_cached_bottlenecks(sess, image_lists, FLAGS.test_batch_size, + 'testing', FLAGS.bottleneck_dir, + FLAGS.image_dir, jpeg_data_tensor, + bottleneck_tensor)) + test_accuracy, predictions = sess.run( + [evaluation_step, prediction], + feed_dict={bottleneck_input: test_bottlenecks, + ground_truth_input: test_ground_truth}) + print('Final test accuracy = %.1f%% (N=%d)' % ( + test_accuracy * 100, len(test_bottlenecks))) + + if FLAGS.print_misclassified_test_images: + print('=== MISCLASSIFIED TEST IMAGES ===') + for i, test_filename in enumerate(test_filenames): + if predictions[i] != test_ground_truth[i].argmax(): + print('%70s %s' % (test_filename, + list(image_lists.keys())[predictions[i]])) + + # Write out the trained graph and labels with the weights stored as + # constants. + output_graph_def = graph_util.convert_variables_to_constants( + sess, graph.as_graph_def(), [FLAGS.final_tensor_name]) + with gfile.FastGFile(FLAGS.output_graph, 'wb') as f: + f.write(output_graph_def.SerializeToString()) + with gfile.FastGFile(FLAGS.output_labels, 'w') as f: + f.write('\n'.join(image_lists.keys()) + '\n') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + '--image_dir', + type=str, + default='', + help='Path to folders of labeled images.' + ) + parser.add_argument( + '--output_graph', + type=str, + default='logs/output_graph.pb', + help='Where to save the trained graph.' + ) + parser.add_argument( + '--output_labels', + type=str, + default='logs/output_labels.txt', + help='Where to save the trained graph\'s labels.' + ) + parser.add_argument( + '--summaries_dir', + type=str, + default='logs/retrain_logs', + help='Where to save summary logs for TensorBoard.' + ) + parser.add_argument( + '--how_many_training_steps', + type=int, + default=5000, + help='How many training steps to run before ending.' + ) + parser.add_argument( + '--learning_rate', + type=float, + default=0.01, + help='How large a learning rate to use when training.' + ) + parser.add_argument( + '--testing_percentage', + type=int, + default=10, + help='What percentage of images to use as a test set.' + ) + parser.add_argument( + '--validation_percentage', + type=int, + default=10, + help='What percentage of images to use as a validation set.' + ) + parser.add_argument( + '--eval_step_interval', + type=int, + default=100, + help='How often to evaluate the training results.' + ) + parser.add_argument( + '--train_batch_size', + type=int, + default=100, + help='How many images to train on at a time.' + ) + parser.add_argument( + '--test_batch_size', + type=int, + default=-1, + help="""\ + How many images to test on. This test set is only used once, to evaluate + the final accuracy of the model after training completes. + A value of -1 causes the entire test set to be used, which leads to more + stable results across runs.\ + """ + ) + parser.add_argument( + '--validation_batch_size', + type=int, + default=100, + help="""\ + How many images to use in an evaluation batch. This validation set is + used much more often than the test set, and is an early indicator of how + accurate the model is during training. + A value of -1 causes the entire validation set to be used, which leads to + more stable results across training iterations, but may be slower on large + training sets.\ + """ + ) + parser.add_argument( + '--print_misclassified_test_images', + default=False, + help="""\ + Whether to print out a list of all misclassified test images.\ + """, + action='store_true' + ) + parser.add_argument( + '--model_dir', + type=str, + default='logs/imagenet', + help="""\ + Path to classify_image_graph_def.pb, + imagenet_synset_to_human_label_map.txt, and + imagenet_2012_challenge_label_map_proto.pbtxt.\ + """ + ) + parser.add_argument( + '--bottleneck_dir', + type=str, + default='/tmp/bottleneck', + help='Path to cache bottleneck layer values as files.' + ) + parser.add_argument( + '--final_tensor_name', + type=str, + default='final_result', + help="""\ + The name of the output classification layer in the retrained graph.\ + """ + ) + parser.add_argument( + '--flip_left_right', + default=False, + help="""\ + Whether to randomly flip half of the training images horizontally.\ + """, + action='store_true' + ) + parser.add_argument( + '--random_crop', + type=int, + default=0, + help="""\ + A percentage determining how much of a margin to randomly crop off the + training images.\ + """ + ) + parser.add_argument( + '--random_scale', + type=int, + default=0, + help="""\ + A percentage determining how much to randomly scale up the size of the + training images by.\ + """ + ) + parser.add_argument( + '--random_brightness', + type=int, + default=0, + help="""\ + A percentage determining how much to randomly multiply the training image + input pixels up or down by.\ + """ + ) + FLAGS, unparsed = parser.parse_known_args() + tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)