import os from huggingface_hub import InferenceClient client = InferenceClient( "aifeifei798/feifei-flux-lora-v1.1", token=os.getenv("HF_TOKEN") ) client.headers["x-use-cache"] = "0" def feifeifluxapi(prompt, height=1152, width=896, guidance_scale=3.5): # output is a PIL.Image object prompt = prompt.replace("\n", " ") result = client.text_to_image(prompt=prompt, width=width, height=height) return result