traffic_signal_images / traffic_signal_images.py
Sayali9141's picture
Update traffic_signal_images.py
e1cf256 verified
raw
history blame
4.44 kB
import csv
import json
import os
from typing import List
import datasets
import logging
from datetime import datetime, timedelta
import pandas as pd
import requests
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {Singapore Traffic Image Dataset},
author={huggingface, Inc.
},
year={2023}
}
"""
_DESCRIPTION = """\
This dataset contains traffic images from traffic signal cameras of singapore. The images are captured at 1.5 minute interval from 6 pm to 7 pm everyday for the month of January 2024.
"""
_HOMEPAGE = "https://beta.data.gov.sg/collections/354/view"
# _URL = "https://raw.githubusercontent.com/Sayali-pingle/HuggingFace--Traffic-Image-Dataset/main/camera_data.csv"
class TrafficSignalImages(datasets.GeneratorBasedBuilder):
"""My dataset is in the form of CSV file hosted on my github. It contains traffic images from 1st Jan 2024 to 31st Jan 2024 from 6 to 7 pm everyday. The original code to fetch these images has been commented in the generate_examples function."""
# _URLS = _URLS
VERSION = datasets.Version("1.1.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"timestamp": datasets.Value("string"),
"camera_id": datasets.Value("string"),
"latitude": datasets.Value("float"),
"longitude": datasets.Value("float"),
"image_url": datasets.Image(),
"image_metadata": datasets.Value("string")
}
),
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
# The URLs should be the paths to the raw files in the Hugging Face dataset repository
urls_to_download = {
"csv_file": "https://raw.githubusercontent.com/Sayali-pingle/HuggingFace--Traffic-Image-Dataset/main/camera_data.csv"
}
downloaded_files = dl_manager.download_and_extract(urls_to_download['csv_file'])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"csv_file_path": downloaded_files,
},
),
]
def _generate_examples(self, csv_file_path):
# This method will yield examples from your dataset
# start_date = datetime(2024, 1, 1, 18, 0, 0)
# end_date = datetime(2024, 1, 2, 19, 0, 0)
# interval_seconds = 240
# date_time_strings = [
# (current_date + timedelta(seconds=seconds)).strftime('%Y-%m-%dT%H:%M:%S+08:00')
# for current_date in pd.date_range(start=start_date, end=end_date, freq='D')
# for seconds in range(0, 3600, interval_seconds)
# ]
# url = 'https://api.data.gov.sg/v1/transport/traffic-images'
# camera_data = []
# for date_time in date_time_strings:
# params = {'date_time': date_time}
# response = requests.get(url, params=params)
# if response.status_code == 200:
# data = response.json()
# camera_data.extend([
# {
# 'timestamp': item['timestamp'],
# 'camera_id': camera['camera_id'],
# 'latitude': camera['location']['latitude'],
# 'longitude': camera['location']['longitude'],
# 'image_url': camera['image'],
# 'image_metadata': camera['image_metadata']
# }
# for item in data['items']
# for camera in item['cameras']
# ])
# else:
# print(f"Error: {response.status_code}")
camera_data= pd.read_csv(csv_file_path)
for idx, example in camera_data.iterrows():
yield idx, {
"timestamp": example["timestamp"],
"camera_id": example["camera_id"],
"latitude": example["latitude"],
"longitude": example["longitude"],
"image_url": example["image_url"],
"image_metadata": example["image_metadata"]
}