File size: 1,839 Bytes
079c32c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
@startuml async_dataloader
header Async Dataloader
title Async Dataloader

participant main_process
participant async_process
participant get_data_thread
participant job_queue
participant worker_process_0
participant ...
participant worker_process_n
participant async_train_queue
participant cuda_thread
participant cuda_queue
autonumber

main_process -> async_process: Start async_process
main_process -> get_data_thread: Start get_data_thread
alt num_workers > 1
    main_process -> job_queue: Init job_queue
    main_process -> worker_process_0: Start worker_process_0
    main_process -> ...: Start ...
    main_process -> worker_process_n: Start worker_process_n
end
main_process -> async_train_queue: Init async_train_queue
alt use_cuda
    main_process -> cuda_thread: Start cuda_thread
    main_process -> cuda_queue: Init cuda_queue
end

async_process -> get_data_thread: Send request "get_data"
get_data_thread -> get_data_thread: Get data from "data_source"
get_data_thread -> async_process: Send data (in CPU)

alt num_workers <= 1
    async_process -> async_process: Process data
    async_process -> async_train_queue: Put data in queue
else
    async_process -> async_process: Chunk pre-process task into pieces
    async_process -> job_queue: Put sub-tasks in queue
    worker_process_0 -> job_queue: Get a sub-task from queue
    worker_process_n -> job_queue: Get a sub-task from queue
    worker_process_0 -> worker_process_0: Process data
    worker_process_n -> worker_process_n: Process data
    worker_process_0 -> async_train_queue: Put data in queue
    worker_process_n -> async_train_queue: Put data in queue
end

alt use_cuda
    cuda_thread -> async_train_queue: Get data (in  CPU)
    cuda_thread -> cuda_thread: Move data from CPU to GPU
    cuda_thread -> cuda_queue: Put data(in GPU) in queue
end

@enduml