File size: 2,407 Bytes
138d490
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c31e1ce
138d490
 
 
 
 
 
 
9e0afef
 
 
 
 
138d490
 
 
162428e
 
c31e1ce
ff6fe82
64dce86
 
 
 
 
 
 
 
 
 
 
 
 
69b0a93
64dce86
138d490
69b0a93
138d490
 
4575518
9e0afef
44f9490
138d490
 
 
 
44f9490
69b0a93
138d490
69b0a93
f3d866e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import streamlit as st
from bs4 import BeautifulSoup
from langchain.embeddings import HuggingFaceEmbeddings
import pickle
import torch
import io

class CPU_Unpickler(pickle.Unpickler):
    def find_class(self, module, name):
        if module == 'torch.storage' and name == '_load_from_bytes':
            return lambda b: torch.load(io.BytesIO(b), map_location='cpu')
        else: return super().find_class(module, name)


@st.cache_resource
def get_hugging_face_model():
  model_name = "mchochlov/codebert-base-cd-ft"
  hf = HuggingFaceEmbeddings(model_name=model_name)
  return hf


@st.cache_resource
def get_db():
  with open("codesearchdb.pickle", "rb") as f:
    db = CPU_Unpickler(f).load()
  return db


def get_similar_links(query, db, embeddings):
  embedding_vector = embeddings.embed_query(query)
  docs_and_scores = db.similarity_search_by_vector(embedding_vector)
  hrefs = []
  for docs in docs_and_scores:
    html_doc = docs.page_content
    soup = BeautifulSoup(html_doc, 'html.parser')
    href = [a['href'] for a in soup.find_all('a', href=True)]
    hrefs.append(href)
  links = []
  for href_list in hrefs:
      for link in href_list:
        links.append(link)
  return links


embedding_vector = get_hugging_face_model()
db = FAISS.load_local("code_sim_index", embedding_embeddings)

st.title("📒 DSASearch Engine 🤖 ")
text_input = st.text_area("Enter a Code Example", value = 
"""
class Solution:
    def subsets(self, nums: List[int]) -> List[List[int]]:
        outputs = []
        def backtrack(k, index, subSet):
            if index == k:
                outputs.append(subSet[:])
                return
            for i in range(index, len(nums)):
                backtrack(k, i + 1, subSet + [nums[i]])
        for j in range(len(nums) + 1):
            backtrack(j, 0, [])
        return outputs
""", height = 330
)
button = st.button("Find Similar Questions on Leetcode")
if button:
  query = text_input
  answer = get_similar_links(query, db, embedding_vector)
  for link in set(answer):
      st.write(link)
  st.balloons()

else:
  st.info("Please Input Valid Text")

st.markdown("""
        ### Created by Ashwin Rachha.
    """)
st.markdown("Source Data : https://github.com/AshwinRachha/LeetCode-Solutions")
st.markdown("Medium Blog : https://medium.com/@ashwin_rachha/querying-a-code-database-to-find-similar-coding-problems-using-langchain-814730da6e6d")