from pprint import pprint import argparse from itertools import chain, tee from argparse import Namespace def get_default_args(): """Returns the default arguments as a Namespace object.""" default_arg_dict = { 'run_gradio': True, 'run_extended': True, 'demo_public': False, 'model_name_or_path': 'bigscience/bloom-560m', #'google/gemma-2-2b-it', #'meta-llama/Meta-Llama-3-8B', 'load_fp16': False, 'prompt_max_length': None, 'max_new_tokens': 200, 'generation_seed': 123, 'use_sampling': True, 'n_beams': 1, 'sampling_temp': 0.7, 'use_gpu': False, 'seeding_scheme': 'simple_1', 'gamma': 0.25, 'delta': 2.0, 'normalizers': '', 'skip_repeated_bigrams': False, 'ignore_repeated_ngrams': False, 'detection_z_threshold': 4.0, 'select_green_tokens': True, 'skip_model_load': False, 'seed_separately': True, } args = Namespace() args.__dict__.update(default_arg_dict) return args def process_args(args): """Process and normalize command-line arguments.""" args.normalizers = args.normalizers.split(",") if args.normalizers else [] print(args) return args def get_default_prompt(): """Return the default input text for generation.""" return ( "Manchester is a major city in the northwest of England with a rich industrial heritage. The city played a central role in the Industrial Revolution and is known for its influence on industry, music, and culture.[1] One of Manchester's most iconic landmarks is the Manchester Town Hall, a stunning example of Victorian Gothic architecture.[2] The city is also home to the University of Manchester, one of the UK's leading research institutions, and the Manchester Museum, which houses extensive collections in the fields of archaeology, anthropology, and natural history.[3] Manchester has a vibrant cultural scene, having produced several influential bands, including The Smiths, Joy Division, and Oasis.[4] The Northern Quarter is known for its independent shops, cafes, bars, street art, and music venues.[5]" ) def display_prompt(prompt, term_width=80): """Display the prompt text.""" print("#" * term_width) print("Prompt:") print(prompt) def display_results(output, detection_result, args, term_width=80, with_watermark=True): """Display the generated output and detection results.""" watermark_status = "with watermark" if with_watermark else "without watermark" print("#" * term_width) print(f"Output {watermark_status}:") print(output) print("-" * term_width) print(f"Detection result @ {args.detection_z_threshold}:") pprint(detection_result) print("-" * term_width) def str2bool(v): """Util function for user friendly boolean flag args""" if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') def parse_args(): """Command line argument specification""" parser = argparse.ArgumentParser(description="A minimum working example of applying the watermark to any LLM that supports the huggingface 🤗 `generate` API") parser.add_argument( "--run_gradio", type=str2bool, default=True, help="Whether to launch as a gradio demo. Set to False if not installed and want to just run the stdout version.", ) parser.add_argument( "--demo_public", type=str2bool, default=False, help="Whether to expose the gradio demo to the internet.", ) parser.add_argument( "--model_name_or_path", type=str, default="bigscience/bloom-560m", help="Main model, path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--prompt_max_length", type=int, default=None, help="Truncation length for prompt, overrides model config's max length field.", ) parser.add_argument( "--max_new_tokens", type=int, default=200, help="Maximmum number of new tokens to generate.", ) parser.add_argument( "--generation_seed", type=int, default=123, help="Seed for setting the torch global rng prior to generation.", ) parser.add_argument( "--use_sampling", type=str2bool, default=True, help="Whether to generate using multinomial sampling.", ) parser.add_argument( "--sampling_temp", type=float, default=0.7, help="Sampling temperature to use when generating using multinomial sampling.", ) parser.add_argument( "--n_beams", type=int, default=1, help="Number of beams to use for beam search. 1 is normal greedy decoding", ) parser.add_argument( "--use_gpu", type=str2bool, default=True, help="Whether to run inference and watermark hashing/seeding/permutation on gpu.", ) parser.add_argument( "--seeding_scheme", type=str, default="simple_1", help="Seeding scheme to use to generate the greenlists at each generation and verification step.", ) parser.add_argument( "--gamma", type=float, default=0.25, help="The fraction of the vocabulary to partition into the greenlist at each generation and verification step.", ) parser.add_argument( "--delta", type=float, default=2.0, help="The amount/bias to add to each of the greenlist token logits before each token sampling step.", ) parser.add_argument( "--normalizers", type=str, default="", help="Single or comma separated list of the preprocessors/normalizer names to use when performing watermark detection.", ) parser.add_argument( "--skip_repeated_bigrams", type=str2bool, default=False, help="Whether to use the detection method that only counts each unqiue bigram once as either a green or red hit.", ) parser.add_argument( "--detection_z_threshold", type=float, default=4.0, help="The test statistic threshold for the detection hypothesis test.", ) parser.add_argument( "--select_green_tokens", type=str2bool, default=True, help="How to treat the permuation when selecting the greenlist tokens at each step. Legacy is (False) to pick the complement/reds first.", ) parser.add_argument( "--skip_model_load", type=str2bool, default=False, help="Skip the model loading to debug the interface.", ) parser.add_argument( "--ignore_repeated_ngrams", type=str2bool, default=False, help="Ignore repeated ngrams.", ) parser.add_argument( "--seed_separately", type=str2bool, default=True, help="Whether to call the torch seed function before both the unwatermarked and watermarked generate calls.", ) parser.add_argument( "--load_fp16", type=str2bool, default=False, help="Whether to run model in float16 precsion.", ) parser.add_argument( "--run_extended", type=str2bool, default=False, help="Whether to run basic or advance algorithm.", ) args = parser.parse_args() return args def format_names(s): """Format names for the gradio demo interface""" s=s.replace("num_tokens_scored","Tokens Counted (T)") s=s.replace("num_green_tokens","# Tokens in Greenlist") s=s.replace("green_fraction","Fraction of T in Greenlist") s=s.replace("z_score","z-score") s=s.replace("p_value","p value") s=s.replace("prediction","Prediction") s=s.replace("confidence","Confidence") return s def list_format_scores(score_dict, detection_threshold): """Format the detection metrics into a gradio dataframe input format""" lst_2d = [] # lst_2d.append(["z-score threshold", f"{detection_threshold}"]) for k,v in score_dict.items(): if k=='green_fraction': lst_2d.append([format_names(k), f"{v:.1%}"]) elif k=='confidence': lst_2d.append([format_names(k), f"{v:.3%}"]) elif isinstance(v, float): lst_2d.append([format_names(k), f"{v:.3g}"]) elif isinstance(v, bool): lst_2d.append([format_names(k), ("Watermarked" if v else "Human/Unwatermarked")]) else: lst_2d.append([format_names(k), f"{v}"]) if "confidence" in score_dict: lst_2d.insert(-2,["z-score Threshold", f"{detection_threshold}"]) else: lst_2d.insert(-1,["z-score Threshold", f"{detection_threshold}"]) return lst_2d ########################################################################## # Ngram iteration from nltk, extracted to remove the dependency # Natural Language Toolkit: Utility functions # # Copyright (C) 2001-2023 NLTK Project # Author: Steven Bird # Eric Kafe (acyclic closures) # URL: # For license information, see https://github.com/nltk/nltk/blob/develop/LICENSE.txt ########################################################################## def ngrams(sequence, n, pad_left=False, pad_right=False, pad_symbol=None): sequence = iter(sequence) if pad_left: sequence = chain((pad_symbol,) * (n - 1), sequence) if pad_right: sequence = chain(sequence, (pad_symbol,) * (n - 1)) iterables = tee(sequence, n) for i, sub_iterable in enumerate(iterables): # For each window, for _ in range(i): # iterate through every order of ngrams next(sub_iterable, None) # generate the ngrams within the window. return zip(*iterables) # Unpack and flattens the iterables.