mlx-community/jinaai-ReaderLM-v2

The Model mlx-community/jinaai-ReaderLM-v2 was converted to MLX format from jinaai/ReaderLM-v2.

Use with mlx

pip install mlx-lm
from mlx_lm import load, generate

model, tokenizer = load("mlx-community/jinaai-ReaderLM-v2")

prompt="hello"

if hasattr(tokenizer, "apply_chat_template") and tokenizer.chat_template is not None:
    messages = [{"role": "user", "content": prompt}]
    prompt = tokenizer.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )

response = generate(model, tokenizer, prompt=prompt, verbose=True)

Use with mlx model manager

Add Package Dependency in xcode: https://github.com/kunal732/MLX-Model-Manager
import SwiftUI
import MLXModelManager

struct ContentView: View {
    
    @StateObject var JinaManager = ModelManager(modelPath: "mlx-community/jinaai-ReaderLM-v2")
    
    var body: some View {
        VStack {
            
            Button("answer prompt"){
                Task {
                    //load model
                    try await JinaManager.loadModel()
                    
                    //inference
                    await JinaManager.generate(
                        prompt: "convert to markdown: <!DOCTYPE html><html><body><h1>Html Heading One</h1><p>first paragraph.</p></body></html>"
                    )
                }
            }
            
            
            //Model Output
            Text(JinaManager.output)
        }
        .padding()
    }
}
Downloads last month
15
Safetensors
Model size
241M params
Tensor type
FP16
·
U32
·
Inference API
Unable to determine this model’s pipeline type. Check the docs .

Model tree for mlx-community/jinaai-ReaderLM-v2

Base model

jinaai/ReaderLM-v2
Quantized
(4)
this model