text
stringlengths
24
612k
/* * $Id: w_display.c,v 1.25 2008-07-27 03:18:38 haley Exp $ */ /************************************************************************ * * * Copyright (C) 2000 * * University Corporation for Atmospheric Research * * All Rights Reserved * * * * The use of this Software is governed by a License Agreement. * * * ************************************************************************/ /* * w_display.c * * Author John Clyne * * Date Mon Sep 17 14:02:33 MDT 1990 * * This file contains widget code necessary to dynamically create the * 'display' panels for idt. A maximum of MAX_DISPLAYS may be * created simultaneously. */ #include <stdio.h> #include <stdlib.h> #include <X11/Intrinsic.h> #include <X11/StringDefs.h> #include <X11/Shell.h> #include <X11/Xaw/Dialog.h> #include <X11/Xaw/Paned.h> #include <X11/Xaw/AsciiText.h> #include <X11/Xaw/Command.h> #include <X11/Xaw/Toggle.h> #include <X11/Xaw/Form.h> #include <X11/Xaw/Scrollbar.h> #include <X11/Xaw/MenuButton.h> #include <X11/Xaw/SimpleMenu.h> #include <X11/Xaw/Sme.h> #include <X11/Xaw/SmeBSB.h> #include <ncarg/c.h> #include "idt.h" #include "w_dialog.h" #include "display.h" #include "talkto.h" #include "bits.h" #include "commands.h" #include "scroll.h" #include "xrubber.h" extern void Animate( #ifdef NeedFuncProto Widget widget, XtPointer client_data, XtPointer call_data #endif ); /* * simple_command * [internal] * * on entry * id : connection of translator * command : command this box represents */ static void simple_command(wd, format, command) WidgetData *wd; char *format; DisplayCommands command; { Command(wd->id, format, NULL); } static void update_frame_by_num(data, num) Voidptr *data; int num; { WidgetData *wd = (WidgetData *) data; char frame_string[10]; sprintf(frame_string, "%d", num); UpdateFrameLabel(wd, frame_string); } static void update_frame_by_num_cb(data, num) Voidptr *data; int num; { update_frame_by_num(data, num + 1); } /* * The Callbacks */ /*ARGSUSED*/ static void Playback(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; wd->do_play = True; if (wd->do_animate) { Arg args[2]; XtSetArg(args[0], XtNsensitive, False); XtSetValues(wd->animate, args, 1); XtSetValues(wd->done, args, 1); #ifdef DEAD AnimateDispContReverse(wd->a, (int (*)()) NULL, (Voidptr) NULL); #endif AnimateDispContReverse( wd->a, update_frame_by_num_cb, (Voidptr) wd ); XtSetArg(args[0], XtNsensitive, True); XtSetValues(wd->done, args, 1); XtSetArg(args[1], XtNstate, True); XtSetValues(wd->animate, args, 2); } else { simple_command(wd, PLAYBACK_STRING, PLAYBACK); UpdateFrameLabel(wd, ""); } } /*ARGSUSED*/ static void Jogback(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; if (wd->do_animate) { (void) AnimateDisplayPrev(wd->a); update_frame_by_num((Voidptr) wd, AnimateGetImageNum(wd->a)+1); } else { simple_command(wd, JOGBACK_STRING, JOGBACK); UpdateFrameLabel(wd, ""); } } /*ARGSUSED*/ static void Stop(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; if (wd->do_animate) { #ifdef DEAD wd->do_play = False; #endif AnimateStop(wd->a); } else { if (wd->do_play) { SignalTo(wd->id, STOP_SIGNAL); wd->do_play = False; } else { simple_command(wd, REDRAW_STRING, REDRAW); } } } /*ARGSUSED*/ static void Jog(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; if (wd->do_animate) { (void) AnimateDisplayNext(wd->a); update_frame_by_num((Voidptr) wd, AnimateGetImageNum(wd->a)+1); } else { simple_command(wd, JOG_STRING, JOG); UpdateFrameLabel(wd, ""); } } /*ARGSUSED*/ static void Play(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; wd->do_play = True; if (wd->do_animate) { Arg args[2]; XtSetArg(args[0], XtNsensitive, False); XtSetValues(wd->animate, args, 1); XtSetValues(wd->done, args, 1); #ifdef DEAD AnimateDispContForward(wd->a, (int (*)()) NULL, (Voidptr) NULL); #endif AnimateDispContForward( wd->a, update_frame_by_num_cb, (Voidptr) wd ); XtSetArg(args[0], XtNsensitive, True); XtSetValues(wd->done, args, 1); XtSetArg(args[1], XtNstate, True); XtSetValues(wd->animate, args, 2); } else { simple_command(wd, PLAY_STRING, PLAY); UpdateFrameLabel(wd, ""); } } /*ARGSUSED*/ static void Loop(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; if (wd->do_animate) { AnimateLoop(wd->a); } else { simple_command(wd, LOOP_STRING, LOOP); } } void DupSelect(data, value) Voidptr data; char *value; { WidgetData *wd = (WidgetData *) data; Command(wd->id, DUP_STRING, value); wd->pcv.dup = atoi(value); } /*ARGSUSED*/ static void Dup(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; char buf[MAX_DATA_LEN]; sprintf(buf, "%d", wd->pcv.dup); CreateSimpleDialogPopup(widget, "dup:", DupSelect, (Voidptr) wd, buf); } /*ARGSUSED*/ static void Scroll(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* percent scrolled */ { WidgetData *wd = (WidgetData *) client_data; float percent = *(float *) call_data; ScrollTo(wd, percent); } void GotoSelect(data, value) Voidptr data; char *value; { WidgetData *wd = (WidgetData *) data; if (wd->do_animate) { int frame = atoi(value); AnimateDisplayImage(wd->a, frame-1); update_frame_by_num((Voidptr) wd, AnimateGetImageNum(wd->a)+1); } else { Command(wd->id, GOTO_STRING, value); } wd->pcv.goto_ = atoi(value); } /*ARGSUSED*/ static void Goto_(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; char buf[MAX_DATA_LEN]; sprintf(buf, "%d", wd->pcv.goto_); CreateSimpleDialogPopup(widget, "goto:", GotoSelect, (Voidptr) wd, buf); } void SkipSelect(data, value) Voidptr data; char *value; { WidgetData *wd = (WidgetData *) data; Command(wd->id, SKIP_STRING, value); wd->pcv.skip = atoi(value); } /*ARGSUSED*/ static void Skip(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; char buf[MAX_DATA_LEN]; sprintf(buf, "%d", wd->pcv.skip); CreateSimpleDialogPopup(widget, "skip:", SkipSelect, (Voidptr) wd, buf); } void DelaySelect(data, value) Voidptr data; char *value; { WidgetData *wd = (WidgetData *) data; wd->pcv.delay = atoi(value); AnimateSetDelay(wd->a, wd->pcv.delay); } /*ARGSUSED*/ static void Delay(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; char buf[MAX_DATA_LEN]; sprintf(buf, "%d", wd->pcv.delay); CreateSimpleDialogPopup( widget, "delay in 1/100 sec:", DelaySelect, (Voidptr) wd, buf ); } void StartSegmentSelect(data, value) Voidptr data; char *value; { WidgetData *wd = (WidgetData *) data; Command(wd->id, START_SEGMENT_STRING, value); wd->pcv.start_segment = atoi(value); } /*ARGSUSED*/ static void Start_Segment(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; char buf[MAX_DATA_LEN]; sprintf(buf, "%d", wd->pcv.start_segment); CreateSimpleDialogPopup( widget, "start segment:", StartSegmentSelect, (Voidptr) wd, buf ); } void StopSegmentSelect(data, value) Voidptr data; char *value; { WidgetData *wd = (WidgetData *) data; Command(wd->id, STOP_SEGMENT_STRING, value); wd->pcv.stop_segment = atoi(value); } /*ARGSUSED*/ static void Stop_Segment(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; char buf[MAX_DATA_LEN]; sprintf(buf, "%d", wd->pcv.stop_segment); CreateSimpleDialogPopup( widget, "stop segment:", StopSegmentSelect, (Voidptr) wd, buf ); } void SetWindowSelect(data, value) Voidptr data; char *value; { WidgetData *wd = (WidgetData *) data; Command(wd->id, SET_WINDOW_STRING, value); strncpy(wd->pcv.set_window, value, sizeof(wd->pcv.set_window)); /* * SetWindow should really update wd->ar, wd->llx, etc in the * same manner as Zoom() does. Some day... */ } /*ARGSUSED*/ static void Set_Window(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; CreateSimpleDialogPopup( widget, "set device window coordinates:", SetWindowSelect, (Voidptr) wd, wd->pcv.set_window ); } /* * */ /*ARGSUSED*/ static void Done(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; if (wd->do_animate) { /* * turn animation off by calling animation callback. * The animation button is a toggle */ Animate(widget, (XtPointer) wd, (XtPointer) NULL); } simple_command(wd, DONE_STRING, DONE); CloseDisplay(wd->id); XtDestroyWidget(wd->popup); } /* * */ /*ARGSUSED*/ static void CurrentFrame(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; char *current; current = TalkTo(wd->id, "current\n", SYNC); wd->pcv.current_frame = atoi(current); UpdateFrameLabel(wd, current); } /*ARGSUSED*/ static void PrintSelect(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; char *spooler = XtName(widget); Command(wd->id, PRINT_STRING, spooler); } /* * */ void SaveSelect(data, value) Voidptr data; char *value; { WidgetData *wd = (WidgetData *) data; Command(wd->id, SAVE_STRING, value); strncpy(wd->pcv.save, value, sizeof(wd->pcv.save)); } /*ARGSUSED*/ static void Save(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; CreateSimpleDialogPopup( widget, "Please enter file name:", SaveSelect, (Voidptr) wd, wd->pcv.save ); } /* * */ /*ARGSUSED*/ static void Zoom(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; Window root; float llx, lly, urx, ury; /* coords of rubber band */ float new_llx, new_lly, /* zoom coords */ new_urx, new_ury; float ax, bx, ay, by; char buf[80]; root = RootWindowOfScreen(XtScreen(widget)); /* * get the new mapping */ (void) TalkTo(wd->id, NOOP_STRING, SYNC); if (ZoomCoords( XtDisplay(widget), root, wd->ar, &llx,&lly,&urx,&ury) == NULL){ (void) fprintf(stderr, "Zoom failed\n"); return; } /* * map the new mapping into the old */ bx = wd->llx; ax = wd->urx - bx; by = wd->lly; ay = wd->ury - by; new_llx = (ax * llx) + bx; new_lly = (ay * lly) + by; new_urx = (ax * urx) + bx; new_ury = (ay * ury) + by; /* * remember the new coords and aspect ratio */ wd->ar = (new_urx - new_llx) / (new_ury - new_lly); wd->llx = new_llx; wd->lly = new_lly; wd->urx = new_urx; wd->ury = new_ury; sprintf(buf, "%6.4f %6.4f %6.4f %6.4f",new_llx,new_lly,new_urx,new_ury); Command(wd->id, ZOOM_STRING, buf); /* * remember the zoom coordinates as if the SET_WINDOW command * was called */ strcpy(wd->pcv.set_window, buf); } /*ARGSUSED*/ static void UnZoom(widget, client_data, call_data) Widget widget; XtPointer client_data, /* display id */ call_data; /* not used */ { WidgetData *wd = (WidgetData *) client_data; wd->ar = 1.0; wd->llx = wd->lly = 0.0; wd->urx = wd->ury = 1.0; simple_command(wd, UNZOOM_STRING, UNZOOM); } UpdateFrameLabel(wd, frame_string) WidgetData *wd; char *frame_string; { Arg args[10]; char buf[80]; sprintf(buf, "%s %s", FRAME_LABEL_DISPLAY, frame_string); XtSetArg(args[0], XtNlabel, buf); XtSetValues(wd->frame_label, args, 1); } void create_tip_top_panel(paned, wd) Widget paned; WidgetData *wd; { Widget form; Cardinal n; Arg args[10]; n = 0; XtSetArg(args[n], XtNskipAdjust, True); n++; form = XtCreateManagedWidget("form", formWidgetClass,paned, args,n); n = 0; wd->scrollbar = XtCreateManagedWidget("scrollbar", scrollbarWidgetClass,form, (ArgList) args,n); XtAddCallback(wd->scrollbar, XtNjumpProc, Scroll, (XtPointer) wd); n = 0; XtSetArg(args[n], XtNfromHoriz, wd->scrollbar); n++; XtSetArg(args[n], XtNborderColor, XtDefaultBackground); n++; wd->frame_label = XtCreateManagedWidget(FRAME_LABEL_DISPLAY, labelWidgetClass,form,args,n); } /* * The top panel consists of the scrollbar; "playback", "jogback", "stop", * "jog", and "play" buttons. */ void create_top_panel(paned, wd) Widget paned; WidgetData *wd; { Widget form; Pixmap pixmap; Cardinal n; Arg args[10]; n = 0; XtSetArg(args[n], XtNskipAdjust, True); n++; form = XtCreateManagedWidget("form", formWidgetClass,paned, args,n); /* * create a pixmap for the playback button */ pixmap = XCreateBitmapFromData( XtDisplay(paned), RootWindowOfScreen(XtScreen(paned)), (const char *) playback_bits, playback_width,playback_height ); n = 0; XtSetArg(args[n], XtNbitmap, pixmap); n++; wd->playback = XtCreateManagedWidget("playback", commandWidgetClass,form,args,n); XtAddCallback(wd->playback, XtNcallback, Playback, (XtPointer) wd); /* * the jogback button */ pixmap = XCreateBitmapFromData( XtDisplay(paned), RootWindowOfScreen(XtScreen(paned)), (const char *) jogback_bits,jogback_width,jogback_height ); n = 0; XtSetArg(args[n], XtNbitmap, pixmap); n++; XtSetArg(args[n], XtNfromHoriz, wd->playback); n++; wd->jogback = XtCreateManagedWidget("jogback", commandWidgetClass,form,args,n); XtAddCallback(wd->jogback, XtNcallback, Jogback, (XtPointer) wd); /* * the stop button */ pixmap = XCreateBitmapFromData( XtDisplay(paned), RootWindowOfScreen(XtScreen(paned)), (const char *) stop_bits,stop_width,stop_height ); n = 0; XtSetArg(args[n], XtNbitmap, pixmap); n++; XtSetArg(args[n], XtNfromHoriz, wd->jogback); n++; wd->stop = XtCreateManagedWidget("stop",commandWidgetClass,form,args,n); XtAddCallback(wd->stop, XtNcallback, Stop, (XtPointer) wd); /* * the jog button */ pixmap = XCreateBitmapFromData( XtDisplay(paned), RootWindowOfScreen(XtScreen(paned)), (const char *) jog_bits,jog_width,jog_height ); n = 0; XtSetArg(args[n], XtNbitmap, pixmap); n++; XtSetArg(args[n], XtNfromHoriz, wd->stop); n++; wd->jog = XtCreateManagedWidget("jog",commandWidgetClass,form,args,n); XtAddCallback(wd->jog, XtNcallback, Jog, (XtPointer) wd); /* * the play button */ pixmap = XCreateBitmapFromData( XtDisplay(paned), RootWindowOfScreen(XtScreen(paned)), (const char *) play_bits,play_width,play_height ); n = 0; XtSetArg(args[n], XtNbitmap, pixmap); n++; XtSetArg(args[n], XtNfromHoriz, wd->jog); n++; wd->play = XtCreateManagedWidget( "play", commandWidgetClass,form,args,n ); XtAddCallback(wd->play, XtNcallback, Play, (XtPointer) wd); } /* * The middle panel consists of the "loop", "goto", "dup", "skip", * "start segment" and the "stop segment" buttons. */ static void create_middle_panel(paned, wd) Widget paned; WidgetData *wd; { Widget form; Cardinal n; Arg args[10]; n = 0; XtSetArg(args[n], XtNskipAdjust, True); n++; form = XtCreateManagedWidget("form", formWidgetClass,paned, args,n); n = 0; wd->loop = XtCreateManagedWidget("loop", toggleWidgetClass,form,args,n); XtAddCallback(wd->loop, XtNcallback, Loop, (XtPointer) wd); n = 0; XtSetArg(args[n], XtNfromHoriz, wd->loop); n++; wd->dup = XtCreateManagedWidget("dup",commandWidgetClass,form,args,n); XtAddCallback(wd->dup, XtNcallback, Dup, (XtPointer) wd); wd->pcv.dup = DEFAULT_DUP; n = 0; XtSetArg(args[n], XtNfromHoriz, wd->dup); n++; wd->goto_ = XtCreateManagedWidget( "goto",commandWidgetClass,form,args,n ); XtAddCallback(wd->goto_, XtNcallback, Goto_, (XtPointer) wd); wd->pcv.goto_ = DEFAULT_GOTO; n = 0; XtSetArg(args[n], XtNfromHoriz, wd->goto_); n++; wd->skip = XtCreateManagedWidget("skip",commandWidgetClass,form,args,n); XtAddCallback(wd->skip, XtNcallback, Skip, (XtPointer) wd); wd->pcv.skip = DEFAULT_SKIP; n = 0; XtSetArg(args[n], XtNfromHoriz, wd->skip); n++; XtSetArg(args[n], XtNsensitive, False); n++; wd->delay = XtCreateManagedWidget( "delay",commandWidgetClass,form,args,n ); XtAddCallback(wd->delay, XtNcallback, Delay, (XtPointer) wd); wd->pcv.delay = DEFAULT_DELAY; n = 0; XtSetArg(args[n], XtNfromHoriz, wd->delay); n++; wd->start_segment = XtCreateManagedWidget("start segment", commandWidgetClass,form,args,n); XtAddCallback( wd->start_segment, XtNcallback, Start_Segment,(XtPointer) wd ); wd->pcv.start_segment = DEFAULT_START; n = 0; XtSetArg(args[n], XtNfromHoriz, wd->start_segment); n++; wd->stop_segment = XtCreateManagedWidget("stop segment", commandWidgetClass,form,args,n); XtAddCallback(wd->stop_segment,XtNcallback,Stop_Segment,(XtPointer) wd); wd->pcv.stop_segment = DEFAULT_STOP; n = 0; XtSetArg(args[n], XtNfromHoriz, wd->stop_segment); n++; wd->set_window = XtCreateManagedWidget("set window", commandWidgetClass,form,args,n); XtAddCallback(wd->set_window,XtNcallback,Set_Window,(XtPointer) wd); strcpy(wd->pcv.set_window, DEFAULT_SET_WINDOW); } static Widget create_bottom_panel(paned, wd) Widget paned; WidgetData *wd; { Arg args[10]; Widget form; Cardinal n; n = 0; XtSetArg(args[n], XtNskipAdjust, True); n++; form = XtCreateManagedWidget("form", formWidgetClass,paned, args,n); n = 0; wd->done = XtCreateManagedWidget("done",commandWidgetClass,form,args,n); XtAddCallback(wd->done, XtNcallback, Done, (XtPointer) wd); n = 0; XtSetArg(args[n], XtNfromHoriz, wd->done); n++; wd->current_frame = XtCreateManagedWidget("current frame", commandWidgetClass,form,args,n); XtAddCallback(wd->current_frame,XtNcallback,CurrentFrame,(XtPointer)wd); wd->pcv.current_frame = DEFAULT_CURRENT; n = 0; XtSetArg(args[n], XtNfromHoriz, wd->current_frame); n++; wd->print = XtCreateManagedWidget("print",menuButtonWidgetClass, form,args,n); n = 0; XtSetArg(args[n], XtNfromHoriz, wd->print); n++; wd->save = XtCreateManagedWidget("save",commandWidgetClass,form,args,n); XtAddCallback(wd->save, XtNcallback, Save, (XtPointer) wd); strcpy(wd->pcv.save, DEFAULT_SAVE); n = 0; XtSetArg(args[n], XtNfromHoriz, wd->save); n++; wd->zoom = XtCreateManagedWidget("zoom",commandWidgetClass,form,args,n); XtAddCallback(wd->zoom, XtNcallback, Zoom, (XtPointer) wd); wd->ar = 1.0; wd->llx = wd->lly = 0.0; wd->urx = wd->ury = 1.0; n = 0; XtSetArg(args[n], XtNfromHoriz, wd->zoom); n++; wd->unzoom = XtCreateManagedWidget( "unzoom",commandWidgetClass,form,args,n ); XtAddCallback(wd->unzoom, XtNcallback, UnZoom, (XtPointer) wd); n = 0; XtSetArg(args[n], XtNfromHoriz, wd->unzoom); n++; wd->animate = XtCreateManagedWidget( "animate",toggleWidgetClass,form,args,n ); XtAddCallback(wd->animate, XtNcallback, Animate, (XtPointer) wd); wd->do_animate = False; return(wd->print); } /* * dyanmically create a print menu by polling the translator to see * what printing devices are available */ void create_print_menu(print, wd) Widget print; WidgetData *wd; { Widget menu, entry; Arg args[10]; char *alias_list, **spooler_list, **ptr; extern char **SpoolerList(); alias_list = TalkTo(wd->id, "alias\n", SYNC); spooler_list = SpoolerList(alias_list); if (*spooler_list) { menu = XtCreatePopupShell("menu", simpleMenuWidgetClass, print, (ArgList) NULL,0); for (ptr = spooler_list; *ptr; ptr++) { entry = XtCreateManagedWidget(*ptr, smeBSBObjectClass, menu, (ArgList) NULL, 0); XtAddCallback(entry, XtNcallback, PrintSelect, (XtPointer) wd); } } else { Message(wd->id, "Can't find any spooled devices for printing"); XtSetArg(args[0], XtNsensitive, False); XtSetValues(print, args, 1); } } #ifdef DEAD static Visual *get_best_8bit_visual(depth, dpy) int *depth; Display *dpy; { XVisualInfo vinfo; int screen = DefaultScreen(dpy); /* * find best 8-bit depth visual */ if (XMatchVisualInfo(dpy, screen, 8, PseudoColor, &vinfo)) { *depth = vinfo.depth; return(vinfo.visual); } else if (XMatchVisualInfo(dpy, screen, 8, StaticColor, &vinfo)) { *depth = vinfo.depth; return(vinfo.visual); } else if (XMatchVisualInfo(dpy, screen, 8, GrayScale, &vinfo)) { *depth = vinfo.depth; return(vinfo.visual); } else if (XMatchVisualInfo(dpy, screen, 8, StaticGray, &vinfo)) { *depth = vinfo.depth; return(vinfo.visual); } /* * yuck, can't find anything. return the default */ *depth = DefaultDepth(dpy, screen); return (DefaultVisual(dpy, screen)); } #endif /* DEAD */ /* * CreateDisplayPopup * [exported] * * Dynamically create a popup 'display'. This popup is located with its * top left corner in the center of the calling widget. * * on entry * button : widget used to position the display * *metafile : name of the metafile to translate */ void CreateDisplayPopup(button, metafile) Widget button; char *metafile; { Widget paned; /* constraint widget */ Widget canvas; /* the drawing canvas */ Widget popup; /* top-level popup */ Widget print; /* "print" button widget */ Window win; /* drawing canvas window id */ WidgetData *wd; char *s; #ifdef DEAD Arg args[10]; Cardinal n; Visual *visual; int dsp_depth; Colormap cmap; #endif /* DEAD */ if (!(wd = (WidgetData *) malloc(sizeof(WidgetData)))) { (void) fprintf(stderr, "Malloc failed\n"); return; } wd->dpy = XtDisplay(button); /* init some fields */ wd->current_frame_num = -1; /* none initially */ #ifdef DEAD visual = get_best_8bit_visual(&dsp_depth, wd->dpy); if (visual == DefaultVisual(wd->dpy, DefaultScreen(wd->dpy))) { cmap = DefaultColormap(wd->dpy, DefaultScreen(wd->dpy)); dsp_depth = DefaultDepth(wd->dpy, DefaultScreen(wd->dpy)); } else { cmap = XCreateColormap( wd->dpy, RootWindow(wd->dpy, DefaultScreen(wd->dpy)), visual, AllocAll ); } n = 0; XtSetArg(args[n], XtNcolormap, cmap); n++; XtSetArg(args[n], XtNvisual, visual); n++; XtSetArg(args[n], XtNdepth, dsp_depth); n++; popup = XtCreatePopupShell(metafile, topLevelShellWidgetClass, button, args, n); #endif popup = XtCreatePopupShell(metafile, topLevelShellWidgetClass, button, (ArgList) NULL, 0); paned = XtCreateManagedWidget("paned", panedWidgetClass,popup, (ArgList) NULL,0); if (! App_Data.oldidt) { canvas = XtCreateManagedWidget("canvas", widgetClass,paned, (ArgList) NULL,0); wd->canvas = canvas; } /* * open a connection to a translator */ if ((wd->id = OpenDisplay())< 0) { (void) fprintf(stderr, "Translator aborted\n"); return; } wd->popup = popup; wd->app_context = XtWidgetToApplicationContext(popup); /* * The main display is made up of four sub-panels */ create_tip_top_panel(paned, wd); create_top_panel(paned, wd); create_middle_panel(paned, wd); print = create_bottom_panel(paned, wd); XtPopup(wd->popup, XtGrabNone); if (! App_Data.oldidt) { unsigned long mask = CWBackingStore; XSetWindowAttributes xswa; /* * enable button click events in the drawing canvas so the * rubber-banding code in ZoomCoords() can get events from * the drawing window, not the top-level window */ win = (Window) XtWindow(canvas); XSelectInput(XtDisplay(canvas), win, ButtonPressMask); /* * turn on backing store for drawing canvas. Wish we * could do this from an Xt resource. */ xswa.backing_store = WhenMapped; XChangeWindowAttributes(wd->dpy, win, mask, &xswa); } else { win = (Window) -1; } wd->win = win; if (App_Data.debug) { fprintf(stderr, "Canvas window id(%d)\n", win); } /* * now that our drawing window has been mapped we can spawn * the translator and request it to drawn in the idt canvas */ if (StartTranslator(wd->id, metafile, win)< 0) { (void) fprintf(stderr, "Translator aborted\n"); return; } /* * now that the translator is up and running we can poll it to * find out what printing devices are available so we can create * our print menu dynamically */ create_print_menu(print, wd); /* * find out how many frames are in the metafile now that the * translator is up */ s = TalkTo(wd->id, "stop\n", SYNC); wd->pcv.stop_segment = atoi(s); }
// @flow import { Given, When, Then } from 'cucumber'; import { expect } from 'chai'; import { initialSettingsHelpers } from './helpers'; import type { Daedalus } from '../../../types'; declare var daedalus: Daedalus; const { ensureLanguageIsSelected } = initialSettingsHelpers; const INITIAL_SETTINGS_FORM = '.InitialSettings_component'; Given(/^I have selected English language$/, async function() { await ensureLanguageIsSelected(this.client, { language: 'en-US', }); }); Given(/^I dont have a language set$/, async function() { await this.client.execute(() => { daedalus.reset(); }); }); When(/^I am on the language selection screen$/, function() { return this.client.waitForVisible('.InitialSettings_component'); }); When(/^I open language selection dropdown$/, function() { return this.waitAndClick( '.InitialSettings_component .SimpleInput_input' ); }); When(/^I select Japanese language$/, function() { return this.waitAndClick( '//*[@class="SimpleOptions_option"]//*[contains(text(), "Japanese")]' ); }); When(/^I submit the language selection form$/, function() { return this.waitAndClick('.ProfileSettingsForm_submitButton'); }); Then(/^I should not see the language selection screen anymore$/, function() { return this.client.waitForVisible(INITIAL_SETTINGS_FORM, null, true); }); Then(/^I should have Japanese language set$/, async function() { const result = await this.client.executeAsync(done => { daedalus.stores.profile.getProfileLocaleRequest .execute() .then(done) .catch(error => done(error)); }); expect(result.value).to.equal('ja-JP'); });
When you think 7,8,9 years ago; before the ago of unicorns startups was still a very exotic world. And I remember talking with some of my clients and I was telling them: "why don't you go and work with startups" and they were telling me "Julien we are a 25 million euro company and those guys they work out of a garage and they wear blue jeans." There is no way we can work with them. So there has been a shift of mindset. So the big question I want to ask you is 'Is it possible that working with social entrepreneurs will be just as obvious and necessary 5years from now as it has become to work with startups today. We will see this shift of mindset in the coming years. This is the bet we are making. We believe social entrepreneur is the new startup. We believe it is going to be the norm to engage with social entrepreneurs when you are a big company. And when you think about it it makes a lot of sense because all the advantages we saw of engaging with startups make a lot of sense when you engage with social entrepreneurs as well.
Peter Penashue's campaign had to settle up after taking 28 illegal contributions in his 2011 federal election campaign, including more than $18,000 from Provincial Airlines and $5,500 from construction company Pennecon, newly released records show. The records, posted on Elections Canada's website, show Penashue's campaign first made amends last November, almost four months before he told Conservative officials that he wanted to step down and fight for his seat again in a byelection. Penashue resigned his seat in the House of Commons Thursday, as well as his role as intergovernmental affairs minister, in the wake of controversy over taking corporate donations and going over his election campaign spending limit. Revised campaign spending records published Friday show: Penashue's campaign accepted 28 ineligible, or illegal, donations. The illegal monetary donations totalled $27,850, with an additional $18,710.54 in in-kind contributions from Provincial Airlines. The donations included money from Pennecon Ltd., a company whose contribution was revealed last fall by CBC News. Penashue's campaign wrote two big cheques to the federal government through the Receiver General of Canada to cover the ineligible contributions. Candidates can pay back ineligible donors or pay the ineligible amount to the Receiver General. $26,850 was paid on Nov. 28, 2012 to cover the monetary contributions (one ineligible contribution had been paid off shortly after the 2011 election, for a total of $27,850). The in-kind contribution of $18,710.54 was paid March 4, 2013, a week-and-a-half before Penashue resigned. Party gave campaign $44,350 The campaign had just under $4,000 in its account by the end of the summer after the 2011 election, with $15,000 owing to an Innu group that had loaned it $25,000. Innu Development Limited Partnership, which was run at the time by Penashue's brother-in-law, Paul Rich, made the loan without setting out an interest rate in the contract, although an interest-free loan isn't allowed under Canadian election law. The Conservative Party came to Penashue's rescue last November, transferring $10,000 and then $20,000 to the campaign on Nov. 23, 2012. The party transferred another $14,350 to the campaign on March 1, 2013. Penashue's resignation forces a byelection in Labrador, a large and sparsely populated riding. Prime Minister Stephen Harper must announce the date of the byelection within six months. Yvonne Jones, currently a member of the Newfoundland and Labrador house of assembly, said Friday morning she'll run for the federal Liberals. A CBC News investigation first revealed last fall that Penashue's campaign may have taken corporate donations. Had it paid for the flights around Labrador, the campaign would have gone about 21 per cent over the spending maximum allowed by Elections Canada. Now that he has compensated the federal government, the amount spent on the flights no longer counts toward his $84,468.09 cap. It is listed in a separate category for expenses that don't count toward a candidate's spending limit. The records show he was slightly over his cap, at $89,997.85. 'I haven't done anything wrong' In an interview with a Labrador radio station, Penashue said he was waiting for Elections Canada to finish its report into the campaign spending, which he said he expects soon. "What I saw, I wasn't pleased. So I thought that the best and appropriate thing to do was to resign," he said. "I'm not comfortable with those illegitimate contributions and ... I don't think Labradorians feel comfortable with it either so I think it's proper that I resign and have the people of Labrador decide as to who they would like to have them represented in Ottawa." "I haven't done anything wrong because I didn't have any knowledge to it," he added. Penashue told the radio station that he thinks he'll win the byelection.
Q: How can i switch this option menu on fragment to activity I want this option menu on my add apartment fragment to intent here at the add apartment class. while running this code my app gets crashed, I might have missed something could someone please help me, what should I do?? LandlordAddApartment.java public class LandlordAddApartment extends Fragment { @Override public void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); setHasOptionsMenu(true); } @Nullable @Override public void onCreateOptionsMenu(Menu menu, MenuInflater inflater) { inflater.inflate(R.menu.add_apartment, menu); super.onCreateOptionsMenu(menu, inflater); } @Override public boolean onOptionsItemSelected(MenuItem item) { int id = item.getItemId(); if (id == R.id.landlord_add_apartment) { LanlordMapFragment mapFragment = new LanlordMapFragment(); FragmentManager manager = getFragmentManager(); manager.beginTransaction().replace(R.id.landlord_apartment, mapFragment, mapFragment.getTag()).commit(); return true; } return super.onOptionsItemSelected(item); } @Override public void onViewCreated(View view, @Nullable Bundle savedInstanceState) { super.onViewCreated(view, savedInstanceState); getActivity().setTitle("Manage Apartment"); } @Override public View onCreateView(LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) { View v = inflater.inflate(R.layout.landlord_add_apartment, container, false); return v; A: You can use from switching from Fragment to Activity: startActivity(new Intent(getActivity,YourActivity.Class)); And while moving from Activity to Fragment: YourFragment mapFragment = new YourFragment(); FragmentManager manager = getFragmentManager(); manager.beginTransaction().replace(R.id.landlord_apartment, mapFragment, mapFragment.getTag()).commit();
William Taylor (New South Wales politician) William Taylor (1862 – 15 February 1922) was an Australian politician. He was born at Bay of Biscay in Victoria to merchant James Taylor and Elizabeth Park. Educated at Ballarat, he left school at fourteen and entered the product merchandise trade. He married Elizabeth Laverick at Ballarat around 1884; they would have two daughters. He moved to Sydney in 1884, where he opened a branch of the firm he worked for, John Gray and Sons. He was a Rockdale alderman from 1890 to 1922, serving as mayor from 1892 to 1894 and from 1904 to 1909. In 1908 he was elected to the New South Wales Legislative Assembly as the Liberal member for St George, serving until his retirement in 1913. Taylor died in Sydney in 1922. References   Category:1862 births Category:1922 deaths Category:Members of the New South Wales Legislative Assembly Category:Mayors of Rockdale
Q: how to build Xamarin Visual Studio Project without Login Xamarin Account I have Xamarin.Android8.0.2.1 installed version on visual studio 2015 when i try to build Xamarin Application it ask me to login to xamarin account Must I login to build and run Xamarin application? A: The Problem has been solved,the problem was in the license file, i reinstall the Xamarin, the login window still appears but the build operation continues without problem
Dry Brushing 101 When was the last time you rubbed a dry brush against your dry skin? If you haven’t done so recently, then you are missing out! Dry brushing (the name of this painful sounding technique) is just that–dry brushing your dry skin in order to induce a plethora of amazing benefits. Well, what are these so called “amazing” benefits, you ask?: Improves circulation Removes layers of dead skin Tones muscles Strenghtens immune system Cleanses lymphatic system Stimulates cell production Reduces appearance of cellulite Relieves stress So, now that you’re eager to get started, here’s how: Starting at your feet, work the brush upwards in a circular motion, using gentle pressure. The key here is to slowly work your way toward your heart–hence starting at your feet! Continue this gentle motion until each body part is done (feet, calves, thighs, buttocks, hands, arms, back, abdomen, chest) Avoid areas that may be more sensitive than others. It is recommended that you dry brush before showering. This prevents any potential moisture from interrupting the process. Not to mention you may want to shower after this process in order to rinse away any of the dead skin cells. (if you decide to skip the shower all together, no judgment here, my friend!) Avoid dry brushing if you are suffering from any wounds, acne, ezcema, or any other skin condition that may be further irritated by dry brushing. If the process ever feels painful or uncomfortable, STOP! Your brush bristles may be too rough or your skin may be very sensitive. Feel free to work your way up to completing your whole body. No rush! Did I mention that natural bristle brushes tend to be more effective and less irritating than the hard, synthetic brushes you may find in your local pharmacy. Dry brush as many times as you’d like, but at least 1x per week. If your skin is sensitive, feel free to do even less than this amount. What benefits have you noticed from dry brushing? We want to hear all about it! Comment below!
Section: News & Summaries Militants concentrate a task group with armored vehicles near Triokhizbenka, Luhansk region. Head of the Information Resistance group Dmytro Tymchuk wrote on Facebook. “Near the settlement of Triokhizbenka, the enemy reinforced task group is concentrated, which includes 16 armored vehicles. The artillery group for support is deployed to the south... Over the previous day, six Ukrainian military were wounded in fighting in the ATO area. The ATO press center informs. “In battles, which continued during the previous day, six Ukrainian soldiers were wounded,” a statement reads. According to the ATO headquarters, currently, they are at medical establishments, where qualified medical aid was given... Ukrainian President Petro Poroshenko will make a working visit to Slovakia this weekend, the presidential press service has reported. “On November 15-16, Ukrainian President Petro Poroshenko will make a working visit to the Slovak Republic,” reads the report. Striking forces of terrorists move to advanced positions near Mariupol. Head of the Information Resistance group Dmytro Tymchuk wrote on Facebook. “Near Mariupol, advance movement of the enemy attack groups is observed, totaling up to two squadrons (advanced guard),” Tymchuk wrote. Militants are actively engaged in strengthening their front line in Donetsk and near Makiyivka. Coordinator of the Information Resistance group Dmytro Tymchuk wrote on his Facebook page. “Russian-terrorist forces strengthened their frontline in Petrivsky, Kyivsky and Kirovsky districts of Donetsk, as well as near Spartak. Actually, all “internal”... Ukrainian military have destroyed two firing positions of terrorists near Donetsk and Krasnohorivka. Information Resistance group leader Dmytro Tymchuk reported this on his Facebook page. “As a result of the actions of Ukrainian troops one firing position of the enemy was destroyed in the area of Spartak and one between the settlements of... Terrorists have violated the terms of the ceasefire in Donbas 50 times over the past 24 hours and again tried to seize the Donetsk airport, the press center for the anti-terrorist operation has reported. “Trying to take control of the Donetsk airport militants focused their actions on attempts to destroy the communications between the... Maidan Community Sector, Lviv: Dear friends! November 13 – On the evening of November 12th Russian military intelligence warship was observed in Latvia’s exclusive economic zone. November 13 – A record number of people wearing military uniform report has crossed Ukrainian-Russian border last week – weekly report of OSCE’s monitoring... Russian-terrorist forces continue to violate the agreement on a temporary ceasefire. Over the past day, ATO position forces suffered more than 40 attacks by Russian occupation forces and illegal armed groups. In particular, enemy troop activity is most noted in the Luhansk, Donetsk and Debaltseve directions – 13, 12 and 11 fire strikes each,... Ukrainian law enforcement agencies have drafted a joint action plan under the conditions of Russian aggression. An advisor to the Ukrainian interior minister, Zorian Shkiriak, said this at a press briefing on Thursday, a Ukrinform correspondent reported. “The Interior Ministry, the Defense Ministry and SBU worked out a systemic model of...
Q: Conditionally merge dictionaries using ternary operators I have a function that build a header according to the type of data it receives. I know I could make it with ifs, this is just a curiosity on how ternary operators can be embedded for some sort of dictionary comprehension. I tried implementing the ternary just inside the dictionary and have it always output a dictionary. def _create_header(self, data): base_header = { 'type': data["type"], # arg provided 'dest_ID' : 1, # arg provided 'src_ ID': 2, # arg provided } dest_header = { 'dst_addr': 0, # arg provided } data_header = { 'priority': 0, # arg provided 'length': 0, # arg provided 'data_format': 0, # arg provided 'data_type': 0, # arg provided } index_header = { 'x_of_y': 0, # len dependant 'y': 0, # len dependant } protocol_header = { 'check_type': 0, # sys dependant 'rev': 0, # sys dependant 'reserved': 0, # rev dependant } header = {} return {**base_header, **dest_header if data["type"] in range(4,9) else {}, **data_header, **index_header, **protocol_header} I always get Invalid Syntax, so I guess it might not be possible. Just wanted to know if anyone has tried it. A: The unary operator ** binds stronger than you think. Paranthesize the ternary expression and it should work: return { **base_header, **(dest_header if data["type"] in range(4,9) else {}), **data_header, **index_header, **protocol_header, }
Q: How can I confirm if an async EF6 await db.SaveChangesAsync() worked as expected? My code looks like this: public async Task<IHttpActionResult> Delete(int id) { var userId = Int32.Parse(User.Identity.GetUserId()); UserTest userTest = await db.UserTests.FindAsync(id); if (userTest == null) { return NotFound(); } if (userTest.UserId != userId) { return Unauthorized(); } db.UserTests.Remove(userTest); await db.SaveChangesAsync(); return Ok(); } I think everything up to the db.SaveChangesAsync is okay but how can I confirm if the db.SaveChangesAsync works before doing a return Ok() ? Ideally I think I should be checking for exceptions and things but I am not sure how I could fit that into this code block. A: From msdn: public virtual Task<int> SaveChangesAsync() Return Value Type: System.Threading.Tasks.Task A task that represents the asynchronous save operation. The task result contains the number of objects written to the underlying database. Check if the result is greater than 0: if(await db.SaveChangesAsync() > 0) { ..... } More info here Another option is to wrap this with try ... catch block: try { await db.SaveChangesAsync(); return Ok(); } catch (Exception ex) { return NotFound(ex.Message); } A: You can use the following below :) try { int isSaved = await db.SaveChangesAsync(); if(isSaved > 0){ // is saved }else{ // is not saved } } catch(e) { // handle error here }
252 P.3d 539 (2011) In the Matter of the ESTATE OF Matthew D. KEENAN, a protected person, Appellant, v. COLORADO STATE BANK AND TRUST, Appellee. No. 10CA0112. Colorado Court of Appeals, Div. V. February 17, 2011. *540 Morgan Legal Offices, P.C., Chester H. Morgan, Colorado Springs, Colorado, for Appellant. Wade Ash Woods Hill & Farley, P.C., Herbert E. Tucker, Spencer J. Crona, Denver, Colorado, for Appellee. Opinion by Judge WEBB. When a protected person's cognition improves, a conservatorship may no longer be needed. And even if it is, disagreements with the conservator may warrant substitution of a new conservator for the protected person's best interests. Should a protected person who seeks to terminate a conservatorship, and failing that to replace the conservator, face not only opposition by the conservator but also the risk that the conservator's expenses in opposing him will be paid from the protected person's assets? At common law, such opposition to the protected person does not necessarily breach the conservator's fiduciary duty. And under section 15-14-417(3), C.R.S.2010, if the conservator acts reasonably and in good faith, the conservator should be compensated from assets subject to the conservatorship for expenses reasonably incurred in doing so. Here, we conclude that the record supports the trial court's finding that as conservator for Matthew D. Keenan, Colorado State Bank and Trust (CSBT) acted reasonably and in good faith. However, we further conclude that additional findings are required concerning whether: the $1,945 CSBT paid itself for extraordinary services came from Keenan's income trust; the attorney fees CSBT incurred defending its accounting were reasonable; a full award of its fees and costs would be equitable; and ordering them to be paid from the conservatorship and trust was just. Therefore, we vacate the attorney fees and costs award, remand for such findings, and otherwise affirm. I. Background Keenan is the beneficiary of a disability trust and an income trust funded by a multi-million dollar medical negligence settlement arising from a catastrophic brain injury. Initially, his mother was appointed guardian, conservator, and trustee of the income trust, while CSBT was appointed trustee of the disability trust. In 2005, Keenan moved to terminate the guardianship and conservatorship based on significant improvement in his cognition. By stipulation, CSBT was appointed as trustee of the income trust and conservator, and Anne Grasee was appointed as his limited guardian. In 2007, the relationship between Keenan and Grasee soured. Among other disagreements between them, Keenan asserted that he was seeking to be "restored to capacity," while Grasee perceived his behavior as impatient, self-defeating, and inconsistent with her plan to develop his independence gradually. She sought legal advice and started dealing with Keenan through communications from her attorney to his attorney, but did not resign. Grasee proposed to have Keenan evaluated by Stuart Kutz, Ph.D., a neuropsychologist. *541 Keenan discharged his attorney, who had agreed to the evaluation, requested pro se that the court "terminate any and all forms of guardianship," and secured new representation. His counsel supplemented the pro se request with a petition to terminate the conservatorship and opposed having to pay for the Kutz evaluation. Keenan submitted an evaluation from another neuropsychologist who found him unimpaired and recommended terminating both the guardianship and the conservatorship. CSBT joined in Grasee's motion to appoint Kutz to evaluate Keenan, which the court granted with payment to be made from the disability trust or the conservatorship. In his October 9, 2007, report of the examination, Kutz concluded that Keenan no longer met the statutory definition of incapacity for guardianship. Grasee resigned shortly thereafter. But using the standard of incapacity applicable to conservatorships, he also concluded that Keenan: [I]s unable to manage property and business affairs because he is unable to effectively receive or evaluate information, or both, or make decisions. . . . A professional fiduciary, such as a conservator and/or trustee, is indicated. . . . The report did not address changing fiduciaries. In later proceedings, Kutz testified to having told CSBT that he did not believe that excessively changing fiduciaries was in Keenan's best interests. On October 15, 2007, Keenan proceeded to hearing on his petitions before Judge Sandstead. Initially, the court granted Grasee a decree of final discharge. The court then heard testimony, including from Keenan, who stated on cross-examination, "I don't have a problem with a conservator, . . . [i]t's having this bank as my conservator." In colloquy at the end of the hearing, Keenan's counsel acknowledged that given Grasee's resignation and Keenan's statement, the focus should be on replacing CSBT as conservator. By written order, the court maintained the conservatorship without addressing Keenan's capacity; recognized that Keenan could seek replacement of CSBT; and directed CSBT to assist Keenan in obtaining a new manual wheelchair and a portable electric wheelchair charger. Keenan then moved under section 15-14-112(2), C.R.S.2010, to replace CSBT as conservator with Members Trust Company, based on opinions from his treatment providers that ongoing conflict with CSBT was detrimental to his mental health. CSBT opposed the motion and sought Keenan's medical records. On December 13, 2007, without holding a hearing, Judge Sandstead entered an order removing CSBT under the best interests standard; substituting Members Trust; and directing CSBT to file its final accounting. Keenan proposed that CSBT absorb its attorney fees incurred in opposing termination of the conservatorship and substitution of Members Trust. Instead, CSBT filed its accounting, which included those fees. Keenan objected, primarily challenging CSBT's joinder in the motion for the Kutz examination; opposition to termination of the conservatorship; payment of bills of Kutz, of Grasee, of her counsel, and of its own counsel; payment of its "extraordinary" fees; failure to obtain a charger and a new wheelchair for him; opposition to substitution of Members Trust; and overall fiduciary administration. Discovery began. On November 26, 2008, Judge Bailin took up CSBT's summary judgment motion and under C.R.C.P. 56(h) ruled that: CSBT had the right to oppose Mr. Keenan's motion to terminate, including requesting an evaluation, provided that it was acting reasonably and . . . in good faith believed that termination was not in the best interests of Mr. Keenan because Mr. Keenan continued to be unable to manage his own assets. . . . The court did not address the propriety of CSBT's opposition to substituting Members Trust and ruled that Keenan's other objections raised factual issues concerning CSBT's good faith. Keenan filed but then withdrew a request for C.R.C.P. 54(b) certification of this order as a final judgment. Following several days of hearings that included conflicting expert testimony on *542 CSBT's conduct, Judge Sandstead entered a lengthy written order. Applying the standard adopted by Judge Bailin, he found that CSBT had acted reasonably and in good faith. He rejected all of Keenan's challenges except the request for a portable electric charger and a new wheelchair. Concerning those two items, he found no breach of fiduciary duty and that "CSBT acted reasonably under the circumstances"; observed "those requests might have been handled better, an understatement"; and, "because it seems fair and just," ordered CSBT "to be surcharged in the amount of $5000.00." Otherwise, Judge Sandstead approved as "reasonable and appropriate" all disputed expenditures incurred and payments made by CSBT during 2007, including fees of Grasee and her counsel; joinder in the motion to appoint Kutz and payment of his fees; participation in the October 15, 2007, hearing; and opposition to substituting Members Trust. Judge Sandstead retired. CSBT petitioned for attorney fees of $198,126.80 and costs of $19,339.24 incurred defending its accounting. Keenan filed an opposition but did not request a hearing. Without holding a hearing, Judge Mulvahill, who had just been appointed, entered orders in the form tendered by CSBT awarding it all fees and costs requested, to be paid equally from the conservatorship and the disability trust. II. Reasonably and In Good Faith Test Keenan first contends Judge Bailin erred in articulating the legal standard as: "CSBT had the right to oppose Mr. Keenan's motion to terminate . . . provided that it was acting reasonably and . . . in good faith. . . ." On de novo review, see Sidman v. Sidman, 240 P.3d 360, 362 (Colo.App.2009), we disagree. A. Legal Standard for Opposing a Protected Person's Motion to Terminate the Conservatorship 1. Jurisdiction Initially, we reject CSBT's assertion that Keenan "forfeited his right to appeal" this issue by withdrawing his motion to certify as a final judgment Judge Bailin's legal determination under C.R.C.P. 56(h). CSBT cites no authority supporting this assertion, nor have we found any in Colorado. To the contrary, where an interlocutory appeal may be taken, failure to do so does not foreclose review of the ruling after entry of final judgment. See, e.g., Mountain Plains Constructors, Inc. v. Torrez, 785 P.2d 928, 931 (Colo. 1990) (order denying motion to compel arbitration); In re Nw. Mut. Life Ins. Co., 703 P.2d 1314, 1317 (Colo.App. 1985) (order appointing receiver). We decline to adopt a different approach where the appeal did not even become permissive for lack of a C.R.C.P. 54(b) certification. 2. Merits In articulating this legal standard, Judge Bailin relied primarily on sections 15-14-431(4) and 15-14-425, C.R.S.2010. Section 15-14-431(4) states in relevant part: "[B]efore terminating a conservatorship, the court shall follow the same procedures . . . that apply to a petition for conservatorship." She reasoned that, because section 15-14-408, C.R.S.2010, requires a hearing in a conservatorship petition, section 15-14-431(4) "envisions a hearing" when a protected person moves to terminate the conservatorship. Judge Bailin next noted, "Colorado law does envision that a conservator may, at least in some circumstances, oppose a protected person's motion to terminate," citing section 15-144-25(2)(x), C.R.S.2010. This section states, in relevant part: A conservator, acting reasonably and in an effort to accomplish the purpose of the appointment . . . may . . . [p]rosecute or defend actions, claims, or proceedings in any jurisdiction for the protection of assets of the estate and of the conservator in the performance of fiduciary duties. a. Common Law Keenan first asserts that the "reasonably and in good faith" standard is contrary to common law conflict of interest principles for fiduciaries. We begin with this assertion because no statute specifically permits a conservator to oppose a protected person's motion to terminate the conservatorship, but *543 conclude that the common law does not support Keenan. Keenan cites 39 Am. Jur. 2d Guardian and Ward § 162, which states that "[a]t common law, an action cannot be maintained between a guardian and a ward while that relation exists. . . ." He also relies on Rule 6 of the National Guardianship Association's "Model Code of Ethics for Guardians" (Model Code) for the proposition that "[n]ot only is it a breach of fiduciary duty to oppose a plea for termination of the guardianship [or] conservatorship, the fiduciary is bound to assist the ward [or] protected person in doing so." Neither authority is persuasive. Although 39 Am. Jur. 2d Guardian and Ward § 162 does not define "an action," the cases cited only prohibit a guardian from suing the ward while retaining control over the ward's estate. See, e.g., Briggs v. Briggs, 162 Tex. 177, 346 S.W.2d 106, 109 (1961) (quoting with approval Kidd v. Prince, 215 S.W. 844, 845 (Tex.Com.App.1919) ("incapacity [of the guardian] to sue [his ward] arises from the nature of the relationship of the parties")). The common law impliedly allows a guardian or conservator to oppose the ward or protected person's motion to terminate the relationship by holding that a guardian or conservator could be paid out of the ward's or protected person's funds for, reasonably and in good faith, opposing the motion to terminate. See In re Guardianship of Cookingham, 45 Cal.2d 367, 289 P.2d 16, 19 (1955); Conservatorship of Lefkowitz, 50 Cal. App.4th 1310, 58 Cal.Rptr.2d 299, 302 (1996); Woodruff v. Trust Co. of Georgia, 233 Ga. 135, 210 S.E.2d 321, 325 (1974); Palmer v. Palmer, 38 N.H. 418, 420 (N.H.1859); In re Larner, 39 Misc. 377, 380-81, 79 N.Y.S. 836, 837-38 (N.Y.Sup.Ct.1902); American Nat'l Bank v. Bradford, 28 Tenn.App. 239, 188 S.W.2d 971, 980 (1945), superseded in part on other grounds by Tenn. R. Evid. 201, as stated in Counts v. Bryan, 182 S.W.3d 288, 291 (Tenn.Ct.App.2005). In this regard, we discern no principled difference between a guardian and a conservator. See, e.g., Morgan County Dep't of Human Services ex rel. Yeager, 93 P.3d 589, 592 (Colo.App.2004) (noting that C.R.C.P. 17(c) refers to "a general guardian, conservator, or other like fiduciary"); § 15-10-201(19), C.R.S.2010 ("`[f]iduciary' includes a personal representative, guardian, conservator, and trustee"). The common law also recognizes that opposing a motion to terminate does not necessarily create a conflict of interest. While "a guardian does owe his ward the duty of undivided loyalty," Bradford, 188 S.W.2d at 979-80, "[not] every effort to release an incompetent person from restraint is of necessity friendly, and every effort to continue the restraint . . . unfriendly," Larner, 39 Misc. at 379, 79 N.Y.S. at 837-38. Thus, while in some cases it would be an act of intolerable oppression for the guardian to oppose . . . an application [to restore capacity], as where it is clear the ward has recovered and is restored in mind . . . [,] in other cases the guardian's duty of loyalty would require him to oppose such an application, as where it is clear the ward is not recovered. Bradford, 188 S.W.2d at 979-80. Further, to discourage or prohibit a guardian from opposing a motion to restore capacity in every instance could "allow the petition for restoration to be considered without the presentation of all of the facts." Cookingham, 289 P.2d at 19. Such lack of evidence would prejudice protected persons where their interests "require that the guardianship should continue unrevoked." See Palmer, 38 N.H. at 420. The Model Code was first proposed in Michael D. Casasanto et al., A Model Code of Ethics for Guardians, 11 Whittier L.Rev. 543 (1989). It has not been endorsed by the American Law Institute or any similar body as reflecting the common law. Keenan cites no appellate court decision adopting it on this basis, nor have we found any in Colorado. Rule 6, which is followed by three subparts, reads: "The guardian has an affirmative obligation to seek termination or limitation of the guardianship whenever indicated." Id. at 566 (emphasis added). The Model Code does not explain "whenever indicated." *544 Although subpart 3 notes that "[t]he guardian shall assist the ward in terminating or limiting the guardianship . . .," the comments add that "where the guardian does not agree with the ward, [he should] arrang[e] for representation of the ward by independent legal counsel." Id. at 566-67. Therefore, the common law does not persuade us to disavow Judge Bailin's "reasonably and in good faith" test. b. Statutory Provisions Keenan also asserts that Judge Bailin erred in her statutory analysis. Again, we disagree. We interpret statutes de novo, looking first to the plain language. Granite State Ins. Co. v. Ken Caryl Ranch Master Ass'n, 183 P.3d 563, 567 (Colo.2008). If the meaning is clear, we apply the statute as written. Wells Fargo Bank v. Kopfman, 226 P.3d 1068, 1072 (Colo.2010). First, Keenan argues that a protective proceeding is not "an action, claim or proceeding" that a conservator can prosecute or defend under section 15-14-425(2)(x) which, he avers, "unmistakably applies to ordinary litigation." But Keenan cites no authority so limiting this provision, nor have we found any. Further, because the statute encompasses "actions, claims, or proceedings," the term "proceedings" must be broader than "actions." Cf. Scott v. Scott, 136 P.3d 892, 896 (Colo.2006) ("the unsupervised administration of an estate may involve multiple proceedings"). And for reasons set forth in cases such as Bradford and Cookingham, allowing a conservator to oppose a motion to terminate would be "for the protection of assets of the estate," where the conservator had a good faith belief that the protected person remained unable to manage those assets. Second, Keenan contrasts section 15-14-425(2)(x) with section 15-14-431(4), which he correctly asserts "says nothing about who, if anybody, could or should prosecute any opposition to termination." Therefore, he argues, "statutory construction should not be made to abrogate common law absent a clear declaration that such was intended." But because the common law allows a conservator or guardian to oppose the protected person's motion to terminate, reading the statute to prohibit that action, as Keenan advocates, would contravene the very principle he cites. Third, Keenan argues that even if opposing the protected person's motion to terminate is not an impermissible conflict of interests, section 15-14-417(1) governs such opposition. CSBT counters that Judge Bailin's order should be upheld under section 15-14-417(3). Although Judge Bailin did not cite section 15-14-417(3) in her order, we agree that it, like the common law, impliedly supports the "reasonably and in good faith" standard she articulated. Section 15-14-417, "Compensation, fees, costs, and expenses of administration—expenses," also does not address the propriety of a conservator opposing the protected person's motion to terminate. However, by specifying when payment of the conservator from the protected person's estate is appropriate, it implicitly authorizes the actions that trigger payment. For that reason, the standards that govern payments to the conservator also apply to the propriety of the conservator's underlying conduct. The section contains six subsections, of which four are relevant here.[1] Subsection (1), "Compensation," states, in relevant part: If not otherwise compensated for services rendered, any visitor, guardian, conservator, special conservator, lawyer for the respondent, lawyer whose services resulted in a protective order or in an order beneficial to an incapacitated person or to a protected person's estate, any physician, guardian ad litem, or any other person appointed by the court is entitled to reasonable compensation from the estate even if no fiduciary is appointed. Subsection (2), "Fees," enumerates factors to be considered when determining whether *545 fees incurred are reasonable, consistent with Colorado Rules of Professional Conduct 1.5, and is discussed more fully in Part IV, infra. Subsection (3), "Expenses in estate litigation," states, in relevant part: [I]f any guardian, conservator, special conservator, or court-appointed fiduciary defends or prosecutes any proceeding in good faith, whether successful or not, he or she is entitled to receive from the estate his or her necessary time, expenses, and disbursements including reasonable attorney fees incurred. Any such person or fiduciary who is unsuccessful in defending the propriety of his or her actions in a breach of fiduciary duty action shall not be entitled to recover expenses under this section to the extent of any matters on which such breaches are found. Subsection (4) recognizes expenses incurred in defense of fiduciary fees, and is also discussed in Part IV, infra. Keenan's assertion that subsection (1) controls is incorrect. He states that "[a]lthough subsection (1) does not specify (or limit) its application to protective proceedings, by clear implication that's what it means," but neither offers argument nor cites authority supporting this statement. See, e.g., Bloom v. NCAA, 93 P.3d 621, 623 (Colo.App.2004) (an assertion unsupported by specific argument or authority need not be addressed). He also argues that because subsection (1) is more specific than subsection (3), the former takes precedence "to the extent inconsistent" with it. See Crandall v. City & Cnty. of Denver, 238 P.3d 659, 662 n. 2 (Colo.2010) (a specific statutory provision takes precedence over a more general one). But the plain language of subsection (3) deals expressly with litigation and its related expenses, including attorney fees. In contrast, subsection (1) applies more broadly to "services rendered," and does not specify or limit the types of services eligible for compensation. Therefore, we conclude that because subsection (3) is more specific than subsection (1), it takes precedence where, as here, "estate litigation" is involved.[2] Subsection (3) allows the conservator to collect expenses, including reasonable attorney fees, from the protected person's estate in good-faith defense of a proceeding. This comports with the common law and supports Judge Bailin's articulation of the "reasonably and in good faith" standard.[3] Accordingly, we conclude that a conservator can reasonably and in good faith oppose a protected person's motion to terminate the conservatorship. B. Legal Standard for Opposing a Protected Person's Motion to Substitute Conservators Judge Bailin's C.R.C.P. 56(h) order did not address a conservator's opposition to a protected person's motion to substitute a new conservator. Nevertheless, Judge Sandstead applied the "reasonably and in good faith" standard to CSBT's opposition to Keenan's motion to replace it with Members Trust. Neither Keenan nor CSBT makes a separate argument as to the legal standard for opposing a motion to change conservators. While a conservator's opposition to such a motion raises the spectre of self-dealing (see Sub-Part C, infra), for two reasons we decline to adopt a different standard in this context. First, the common law does not differentiate between opposing a motion to terminate the conservatorship and opposing a motion to change conservators. See Conservatorship of Lefkowitz, 58 Cal.Rptr.2d at *546 301-02 (analyzing the later for reasonableness and good faith); cf. In re Pelgram's Estate, 146 Misc. 750, 262 N.Y.S. 848, 854-55 (N.Y.Sur.Ct.1933) (trust estate responsible for fees of trustee's attorney for opposing removal of co-trustee in good faith—up to point where further opposition became unreasonable). Second, a conservator's opposition to the protected person's motion to change conservators also constitutes "estate litigation" under section 15-14-417(3), for the reasons discussed in the preceding subsection.[4] C. Policy Considerations We acknowledge Keenan's argument that personal autonomy will suffer if a protected person seeking to be restored to capacity must not only face opposition by the person's conservator but also risk paying for the conservator's opposition. And we agree that most conservators will have an inherent conflict of interest because maintaining the conservatorship and the conservator's appointment ensure that the conservator continues to be paid for managing the estate.[5] But even where a statute is "troubling," a court "must resist the temptation to change the statutory language." Common Sense Alliance v. Davidson, 995 P.2d 748, 755 (Colo.2000). "It is not up to the court to make policy or to weigh policy." Town of Telluride v. Lot Thirty-Four Venture, L.L.C., 3 P.3d 30, 38 (Colo.2000). Such action must be left to the General Assembly. See, e.g., People v. Clendenin, 232 P.3d 210, 218 (Colo.App.2009) (Loeb, J., specially concurring) (noting "anomaly" in medical marijuana amendment and suggesting "that some legislative action will be required if the. . . purposes of the amendment are to be fully effectuated"). III. CSBT Acted Reasonably and in Good Faith Keenan next contends Judge Sandstead erred by finding CSBT "acted reasonably and in good faith to protect the conservatorship and trust estates." We disagree. "Reasonable" means "[f]air, proper, or moderate under the circumstances." Black's Law Dictionary 1379 (2009). "`Good faith' has been defined in terms of both a subjective standard—i.e., `an intangible and abstract quality' that encompasses `an honest belief'—and an objective standard—i.e., `freedom from knowledge of circumstances which ought to put the holder upon inquiry.'" Lybarger v. People, 807 P.2d 570, 577 (Colo. 1991) (quoting Black's Law Dictionary 623 (1979)). Acting reasonably is a fact question, Heller v. First Nat'l Bank of Denver, N.A., 657 P.2d 992, 999 (Colo.App. 1982), as is acting in good faith, Woodruff, 210 S.E.2d at 325. When the record shows a party's reasonableness, separate inquiry into good faith need not be undertaken absent evidence of bad faith. See Lybarger, 807 P.2d at 579-81; cf. Knight v. Lawrence, 19 Colo. 425, 432, 36 P. 242, 245 (1894) (observing that "[t]he law presumes that all men act in good faith until there is some evidence to the contrary"). We review a court's factual findings for clear error, Schuler v. Oldervik, 143 P.3d 1197, 1202 (Colo.App.2006), and reverse a finding only if it has no support in the record, M.D.C./Wood, Inc. v. Mortimer, 866 P.2d 1380, 1383 (Colo.1994). When the record includes conflicting expert testimony, "[w]e review the trial court's resolution of that conflicting evidence under a highly deferential standard." Bainbridge, Inc. v. Bd. of Cnty. Comm'rs, 53 P.3d 646, 649 (Colo.App. 2001). Here, Judge Sandstead heard conflicting testimony from experts for Keenan and *547 CSBT regarding the propriety of its actions in opposing Keenan's motions to terminate the conservatorship and to replace it as conservator. Keenan presented testimony from two experts that CSBT had acted unreasonably. However, the following testimony of CSBT's expert, Martha Ridgway, supports Judge Sandstead's finding that CSBT acted reasonably: • CSBT had a duty to "act[] in a matter that was in [Keenan's] best interest and in the best interest of [his] funds. . . ." • Keenan's motion to terminate the conservatorship included "evaluations that he had gotten himself." • It was appropriate for CSBT to "want[] to have an independent evaluation" from Kutz, who "didn't have an agenda." • CSBT may have breached a fiduciary duty "if [it] hadn't made such a request because the prior evaluations . . . were sought out by [Keenan,]" who "had an agenda." • CSBT had a statutory right to petition the court to have Keenan evaluated. • After Kutz's report, CSBT appropriately continued to oppose termination because "Kutz put in his report that Mr. Keenan. . . admitted that he needed help with his money." • CSBT may have been "extraordinarily exposed" to liability if it had not opposed termination because "not only did [it] have actual knowledge that Dr. Kutz . . . had recommended . . . continuation," but also "Keenan had admitted in prior reports that he felt he needed a fiduciary." • "It would have been catastrophic had the money been turned over [to Keenan]." • "Dr. Kutz advised several of the professionals in this case that he was very concerned with . . . a churning pattern" of changing conservators that he felt "was not in Mr. Keenan's best interests." • CSBT knew Kutz "was very concerned about this revolving door issue. And [CSBT] was quite concerned . . . to make sure that changing fiduciaries was in [Keenan's] best interests. So I think they had no choice but to do what they did." Further, when asked, "Is there any indication that [CSBT] acted in bad faith?" Ridgway responded, "No, there's not." Nevertheless, in addition to his experts' testimony, Keenan asserts that CSBT did not act in good faith in opposing termination of the conservatorship because: Kutz's report was empirically weak; the report Keenan's psychologist submitted was more credible; and, in his evaluation of Keenan, "Kutz [did] not say that a conservatorship, and only a conservatorship, [was] required." The first two assertions point only to conflicting evidence that is subject to our "highly deferential" review. Bainbridge, Inc., 53 P.3d at 649. As to the third assertion, Keenan fails to address Kutz's testimony that "a limited conservatorship was indicated" and that, during the evaluation, Keenan "indicated to [Kutz] that he definitely felt that he needed a conservator." Keenan further challenges CSBT's good faith by quoting part of a pre-mediation email exchange between CSBT's lawyer and Grasee's lawyer.[6] However, the full exchange, which occurred before Keenan moved to replace CSBT as conservator, shows that CSBT's desire to stay on was spurred, as its lawyer explained, by concern that Keenan's "statements . . . contradict his motion to terminate . . . and may provide proof that he recognizes that the basis for the conservatorship still exists. . . ." Accordingly, despite some significant evidence to the contrary, we conclude that Judge Sandstead did not clearly err in determining that CSBT acted reasonably and in good faith in opposing Keenan's motions. IV. Accounting Issues Keenan next contends that even if CSBT acted reasonably and in good faith overall, Judge Sandstead erred in accepting CSBT's accounting because certain aspects of it were incomplete and unsubstantiated. We disagree, *548 except as to the $1,945 CSBT paid itself for "extraordinary" services, on which we remand for further findings. A fiduciary has a duty to provide "clear and accurate accountings. . . ." See Heller, 657 P.2d at 997. Any fees paid to a guardian out of a protected person's estate must be "reasonable." See § 15-14-417(1). "The determination of reasonableness is a question of fact for the trial court which will not be disturbed on review unless it is patently erroneous and unsupported by the evidence." Heller, 657 P.2d at 999. A. Payments to Grasee Keenan first argues that CSBT failed to provide a "clear and complete accounting" because it did not audit Grasee's invoices of payments to her lawyer.[7] We disagree. The record supports Judge Sandstead's findings that CSBT's payments to Grasee were "reasonable, appropriate and based on billing statements submitted to CSBT" and that "CSBT's reliance on the review of Ms. Grasee, an experienced court-appointed professional guardian, of her own attorney[']s billing statements was appropriate." Keenan's expert, David Griffith, agreed that the invoices Grasee submitted to CSBT for payments to her lawyer appeared to be "itemized statements." Ridgway testified that the fees of Grasee's lawyer were reasonable and that, in turn, the fees CSBT paid to Grasee were reasonable and necessary. She also testified that it was "perfectly acceptable" for CSBT to rely on Grasee's review of her lawyer's bills. B. Claim of Being "Short-Changed" Keenan next argues that he was "[s]hort-change[d] on his statutory entitlement" in the amount of roughly $4,000. But he does not claim that the money to which he may have been entitled was taken from his income trust, nor does he allege any other damages from nonpayment. Moreover, even if CSBT did not pay him the full amount due, the trust assets are now controlled by Members Trust, whom Keenan selected, and can be paid to him by Members Trust, if appropriate. C. CSBT's Payments After Removal Keenan next argues that CSBT improperly paid fees to itself and its lawyers after having been removed as conservator and trustee. Initially, we note that Keenan does not indicate where he raised this issue below, as required by C.A.R. 28(k). However, because our review of the record discloses that his lawyer raised this issue in closing argument, we will address it here. Keenan does not dispute that these payments were for expenses incurred before CSBT was replaced by Members Trust. He does not cite any authority, nor have we found any in Colorado, holding that a conservator breaches a duty by paying itself for expenses incurred while acting as conservator, after having been removed from that capacity. In any event, we need not resolve this legal question because Judge Sandstead found that CSBT was entitled to be paid for these expenses. Thus, we conclude that Keenan was not harmed because CSBT paid itself rather than releasing Keenan's funds to, and then billing, Members Trust. D. The $1,945 for "Extraordinary" Services Keenan further asserts, and CSBT does not deny, that CSBT paid itself $1,945 for "extraordinary" services in violation of 10 Code Colo. Regs. 2505-10:8.100.7.E(6) and article IV of Keenan's income trust. But whether the $1,945 was paid out of the income trust was not addressed by Judge Sandstead and is unclear from the record. Hence, we remand for further findings. Payments out of an income trust for "extraordinary" services are not authorized by section 15-14-412.7, C.R.S.2010, which *549 specifies income trust limitations. Colorado regulations governing income trusts permit "[a]n amount not to exceed $20.00 [to] be retained for trust expenses. . . ." 10 Code Colo. Regs 2505-10:8.100.7.E(6)(a)(i)(b) & (f). The regulations also direct that "[n]o other deductions or expenses may be paid from the trust." 10 Code Colo. Regs. 2505-10:8.100.7.E(6)(a)(i)(e). And article IV of the income trust permits distribution of trust income only to the beneficiary's nursing facility, with any money remaining to be "retained and accumulated in the trust." Judge Sandstead found that "CSBT's receipt of percentage and of Extraordinary Fees [was] reasonable and appropriate under the circumstances. . . ." But we cannot discern from the record whether this finding included the $1,945, nor can we tell whether CSBT paid this amount from the income trust or another account. On remand, should the trial court conclude that the $1,945 was paid from the income trust, then Keenan is to be credited by this amount and CSBT debited for its attendant fees. V. Fees and Costs Finally, Keenan contests the award of attorney fees and costs that CSBT incurred in defending its accounting. We conclude that further findings are required, vacate the award, and remand. Following entry of Judge Sandstead's order and his retirement, CSBT presented separate motions to Judge Mulvahill for $198,126.80 in attorney fees and $19,339.24 in costs incurred defending its accounting. He adopted CSBT's orders verbatim[8] and awarded CSBT all of the fees and costs that it had requested. The order approving attorney fees recited that section 15-14-417(3) "entitles a conservator to reimbursement if defense of claims is in good faith and no breach of fiduciary duty is found"; that Judge Sandstead's order "found that [CSBT] acted in good faith and did not breach any fiduciary duty owed"; and that CSBT's attorney fees "are just and reasonable and a benefit to the Estate and Trust." The order approving CSBT's costs did not state any separate reasons. Initially, we acknowledge Keenan's assertion that "[t]he court granted the entirety of [CSBT's] claim [for fees and costs] without discussion or hearing." But he does not assert that he requested a hearing, nor do we find any such request in the record. Therefore, we conclude that he waived any right to a hearing on the issue. See In re Marriage of Ensminger, 209 P.3d 1163, 1167 (Colo.App. 2008). Keenan next asserts that Judge Mulvahill should have applied section 15-14-417(1) to CSBT's request because a conservator's actions must benefit the protected person. CSBT counters that section 15-14-417(3) was the correct standard because a fiduciary is entitled to reasonable attorney fees for successfully defending the propriety of its actions against a breach of fiduciary duty claim. Reviewing de novo whether the trial court applied the correct legal standard, see, e.g., Bonidy v. Vail Valley Center for Aesthetic Dentistry, P.C., 232 P.3d 277, 283 (Colo.App. 2010), we reject both positions. Keenan's assertion that section 15-14-417(1) governs because a conservator's actions must benefit the protected person is unpersuasive since CSBT was no longer Keenan's conservator when it defended its accounting, and Judge Sandstead had already addressed the propriety of its underlying conduct as conservator. CSBT's argument is equally unavailing. Section 15-14-417(3) precludes recovery of expenses by a fiduciary "who is unsuccessful in defending the propriety of his or her actions in a breach of fiduciary duty action. . . ." Even if CSBT is correct that by negative implication a successful fiduciary may recover fees, here CSBT was defending the accounting that Judge Sandstead ordered it to file. The accounting was not a separate "breach of fiduciary duty action" commenced by Keenan. *550 Rather, we conclude that Judge Mulvahill should have applied sections 15-14-417(2) and 15-14-417(4) for the following two reasons. First, as discussed above, section 15-14-417(2) lists several factors "to be considered as guides in determining the reasonableness of any fee referred to in this section or in this article" (emphasis added). See Kauntz v. HCA-Healthone, LLC, 174 P.3d 813, 817 (Colo.App.2007) ("`Any' means `all.'"). Therefore, on its face, section 15-14-417(2) applies to a claim for attorney fees that a conservator incurs, including fees in defense of its accounting, because such fees arise from protective proceedings under section 15-14-101, C.R.S.2010. Second, section 15-14-417(4) deals specifically with expenses "incurred in the defense of the fiduciary's fees and costs" when "any fiduciary is required to defend [them]." Even if such a proceeding might also fall within the general language of sections 15-14-417(1) or XX-XX-XXX(3), the more specific provision controls. See, e.g., Crandall, 238 P.3d at 662 n. 2. Here, CSBT filed its accounting of the fees and costs it spent in opposing Keenan's motions to terminate the conservatorship and to replace it as conservator. Keenan objected. In defending the accounting, it incurred further expenses, primarily attorney fees, which fall within the specific provisions of section 15-14-417(4). We are not persuaded otherwise by Keenan's assertion that section 15-14-417(4) is inapplicable because CSBT was not "required" to defend its underlying fees and costs. Specifically, he argues that CSBT "refused each and every opportunity to avoid expensive litigation" and that it is "well established that a missed opportunity to avoid trial constitutes a failure to mitigate and is a relevant factor in considering the award of attorney fees." This assertion raises two separate issues, one as to the applicability of section 15-14-417(4) and the other as to reasonableness under section 15-14-417(2). We disagree with Keenan as to the first issue and include the second issue in our remand order. Regardless of whether CSBT "refused . . . opportunit[ies] to avoid . . . litigation," Keenan objected to CSBT's final accounting, asserting that CSBT "actually spent [Keenan's] money to inflict financial, emotional, psychological, and physical harm." Therefore, CSBT's defense of its accounting was required. Cf. Butler v. Lembeck, 182 P.3d 1185, 1190 (Colo.App.2007) ("[H]omeowner was `required' to initiate legal action against tenants to obtain the relief she sought, because tenants denied that they were liable to her for the asserted damages."). As to the reasonableness of fees under section 15-14-417(2), failure to explore settlement is not a factor listed. While section 13-17-103(1)(h), C.R.S.2010, includes offers of settlement among considerations for determining the reasonableness of fees, Keenan cites no authority applying it to estate litigation or protective proceedings, nor are we aware of any. However, the enumerated factors under section 15-14-417(2) are not exhaustive, and section 15-14-417(4) does not limit the grounds for determining what is "equitable under the circumstances." Hence, the underlying factual issue may, but need not, be considered on remand.[9] Finally, Keenan asserts that even if section 15-14-417(4) applies, the amount CSBT spent defending its accounting was unreasonable and excessive, given: his alleged status as the "winning party"; the amount in controversy; and Judge Sandstead's $5000 surcharge against CSBT on the wheelchair/charger issue. Whether Keenan was the prevailing party for purposes of an attorney fees award is a discretionary decision for the trial court. See Anderson v. Pursell, 244 P.3d 1188, 1193-94 (Colo.2010) ("the trial court is in the best position to observe the course of the litigation and to determine which party ultimately prevailed"). The trial court should address this issue on remand, including findings on how Keenan's success in having CSBT replaced *551 as conservator affects reasonableness under section 15-14-417(2) or what is "equitable" under section 15-14-417(4). Section 15-14-417(2)(d) includes "[t]he amount involved and the results obtained" among the "reasonableness" factors. Because the parties dispute the amount of fees and costs CSBT incurred in the proceedings before it was replaced as conservator, the trial court should resolve this dispute on remand. The court should also address whether awarding CSBT all of the attorney fees requested is at odds with the surcharge on the wheelchair/charger issue. Factual underpinnings likewise favor remand to resolve Keenan's assertion that under section 15-14-417(4), equity lies with him because of the "relative financial position of the parties involved." While this factor—like settlement—appears in section 13-17-103, it finds no mention in section 15-14-417(2). However, because in an equity case a court has "broad and flexible discretionary powers," Flank Oil Co. v. Tennessee Gas Transmission Co., 141 Colo. 554, 568, 349 P.2d 1005, 1013 (1960), on remand the trial court may, but need not, consider this factor in determining what award is "equitable under the circumstances." Finally, we cannot accept the reference to "reasonable" and "just" in the court's order as sufficient because it does not mention either section 15-14-417(2) or section 15-14-417(4), nor does it address the factors in section 15-14-417(2), but rather cites only section 15-14-417(3). Therefore, we remand with instructions to make specific and independent findings whether: the fees sought are reasonable under the factors in section 15-14-417(2); awarding the fees and costs to CSBT is "equitable under the circumstances of the case," section 15-14-417(4); and ordering them to be paid from the conservatorship and trust is "just," id. See, e.g., Federal Ins. Co. v. Ferrellgas, Inc., 961 P.2d 511, 515 (Colo.App.1997) ("A party is . . . entitled to have the trial court make findings sufficient to disclose the basis for its decision to award costs and to support the amount awarded."); cf. Dahl v. Young, 862 P.2d 969, 973 (Colo. App.1993) (vacating and remanding award of attorney fees under section 38-35-109(3) for trial court to make specific findings of reasonableness). On remand, the court shall make specific findings and may, but need not, hold a further hearing. VI. Conclusion The orders are affirmed in part and vacated in part, and the case is remanded for further proceedings consistent with this opinion. Judge GRAHAM and Judge J. JONES concur. NOTES [1] Subsection (5) deals with the priority of paying costs and expenses after the death of an incapacitated or protected person. [2] The term "litigation" is broadly defined. See, e.g., In re R.H., 170 Cal.App.4th 678, 692, 88 Cal.Rptr.3d 650, 660 (2009) ("any civil action or proceeding"). And "estate" includes the property of any person "whose affairs are subject to this code." § 15-10-201(17), C.R.S.2010. [3] Because Keenan does not challenge this standard on the basis that "reasonably" and "in good faith" have different meanings, we leave for another day the case where a fiduciary's conduct satisfies one but not the other. We also decline to address Keenan's argument that the trial court could have exercised its discretionary power to appoint him a guardian ad litem under section 15-14-115, C.R.S.2010. Keenan does not explain why, in light of his pending motion to terminate the guardianship, appointing a guardian ad litem would have been a reasonable exercise of discretion. [4] Because we resolve this issue on common law and statutory grounds, we need not address Judge Sandstead's statement that CSBT also had the right to "seek a hearing to review the sufficiency of cause for removal" under article 11.03 of Keenan's disability trust. [5] Such a conflict could be avoided if a conservator were to "petition the appointing court for instructions concerning fiduciary responsibility" under section 15-14-414(2), C.R.S.2010, before opposing the protected person's motion. However, doing so is discretionary. See id. ("[a] conservator may petition the . . . court") (emphasis added); Jefferson County Bd. of Equalization v. Gerganoff, 241 P.3d 932, 937 (Colo.2010) ("`may' denotes a grant of discretion"). [6] "The Bank's lawyer confided to the guardian's lawyer before the mediation that the Bank had already determined that it was unwilling to resign. The guardian's attorney responded, `Frankly, I wouldn't go out of my way to find anything to offer in mediation.'" [7] He also asserts that Grasee acted improperly by not personally contacting him for the five months preceding her resignation and that Judge Sandstead erred in not holding a hearing on his objection to releasing Grasee under a decree of discharge. But because Keenan does not identify any resulting damages, and the only apparent consequence of Grasee's alleged misconduct would be the propriety of CSBT's payments to her and her lawyer, on which Judge Sandstead did hold a hearing and which we address, we need not take up these assertions separately. [8] The proposed orders left blanks for the amount of fees, the date, and Judge Mulvahill's signature. Because the trial court made no independent findings, we scrutinize the orders "more critically than if they were produced by the trial court itself." Trask v. Nozisko, 134 P.3d 544, 549 (Colo.App.2006). [9] We express no opinion on the applicability of either CRE 408 or section 13-22-307, C.R.S. 2010, to Keenan's argument.
# Copyright (c): Wenyi Tang 2017-2019. # Author: Wenyi Tang # Email: wenyi.tang@intel.com # Update Date: 2019 - 3 - 15 import logging import torch import torch.nn as nn from .Ops.Blocks import EasyConv2d from .Optim.SISR import L1Optimizer _logger = logging.getLogger("VSR.DBPN") _logger.info("LICENSE: DBPN is implemented by Haris. " "@alterzero https://github.com/alterzero/DBPN-Pytorch") class UpBlock(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, activation='prelu'): super(UpBlock, self).__init__() self.up_conv1 = EasyConv2d(num_filter, num_filter, kernel_size, stride, activation=activation, transposed=True) self.up_conv2 = EasyConv2d(num_filter, num_filter, kernel_size, stride, activation=activation) self.up_conv3 = EasyConv2d(num_filter, num_filter, kernel_size, stride, activation=activation, transposed=True) def forward(self, x): h0 = self.up_conv1(x) l0 = self.up_conv2(h0) h1 = self.up_conv3(l0 - x) return h1 + h0 class DownBlock(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, activation='prelu'): super(DownBlock, self).__init__() self.down_conv1 = EasyConv2d(num_filter, num_filter, kernel_size, stride, activation=activation) self.down_conv2 = EasyConv2d(num_filter, num_filter, kernel_size, stride, activation=activation, transposed=True) self.down_conv3 = EasyConv2d(num_filter, num_filter, kernel_size, stride, activation=activation) def forward(self, x): l0 = self.down_conv1(x) h0 = self.down_conv2(l0) l1 = self.down_conv3(h0 - x) return l1 + l0 class D_UpBlock(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, num_stages=1, activation='prelu'): super(D_UpBlock, self).__init__() self.conv = EasyConv2d(num_filter * num_stages, num_filter, 1, activation=activation) self.up_conv1 = EasyConv2d(num_filter, num_filter, kernel_size, stride, activation=activation, transposed=True) self.up_conv2 = EasyConv2d(num_filter, num_filter, kernel_size, stride, activation=activation) self.up_conv3 = EasyConv2d(num_filter, num_filter, kernel_size, stride, activation=activation, transposed=True) def forward(self, x): x = self.conv(x) h0 = self.up_conv1(x) l0 = self.up_conv2(h0) h1 = self.up_conv3(l0 - x) return h1 + h0 class D_DownBlock(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, num_stages=1, activation='prelu'): super(D_DownBlock, self).__init__() self.conv = EasyConv2d(num_filter * num_stages, num_filter, 1, activation=activation) self.down_conv1 = EasyConv2d(num_filter, num_filter, kernel_size, stride, activation=activation) self.down_conv2 = EasyConv2d(num_filter, num_filter, kernel_size, stride, activation=activation, transposed=True) self.down_conv3 = EasyConv2d(num_filter, num_filter, kernel_size, stride, activation=activation) def forward(self, x): x = self.conv(x) l0 = self.down_conv1(x) h0 = self.down_conv2(l0) l1 = self.down_conv3(h0 - x) return l1 + l0 class Dbpn(nn.Module): def __init__(self, channels, scale, base_filter=64, feat=256, num_stages=7): super(Dbpn, self).__init__() kernel, stride = self.get_kernel_stride(scale) # Initial Feature Extraction self.feat0 = EasyConv2d(channels, feat, 3, activation='prelu') self.feat1 = EasyConv2d(feat, base_filter, 1, activation='prelu') # Back-projection stages self.up1 = UpBlock(base_filter, kernel, stride) self.down1 = DownBlock(base_filter, kernel, stride) self.up2 = UpBlock(base_filter, kernel, stride) for i in range(2, num_stages): self.__setattr__(f'down{i}', D_DownBlock(base_filter, kernel, stride, i)) self.__setattr__(f'up{i + 1}', D_UpBlock(base_filter, kernel, stride, i)) self.num_stages = num_stages # Reconstruction self.output_conv = EasyConv2d(num_stages * base_filter, channels, 3) def forward(self, x): x = self.feat0(x) x = self.feat1(x) h1 = self.up1(x) l1 = self.down1(h1) h2 = self.up2(l1) h = h2 concat_h = h1 concat_l = l1 for i in range(2, self.num_stages): concat_h = torch.cat((h, concat_h), 1) l = self.__getattr__(f'down{i}')(concat_h) concat_l = torch.cat((l, concat_l), 1) h = self.__getattr__(f'up{i + 1}')(concat_l) concat_h = torch.cat((h, concat_h), 1) x = self.output_conv(concat_h) return x @staticmethod def get_kernel_stride(scale): if scale == 2: return 6, 2 elif scale == 4: return 8, 4 elif scale == 8: return 12, 8 class DBPN(L1Optimizer): def __init__(self, channel, scale, base_filter=64, feat=256, num_stages=7, **kwargs): self.body = Dbpn(channel, scale, base_filter, feat, num_stages) super(DBPN, self).__init__(scale, channel, **kwargs) def fn(self, x): return self.body(x)
i was surfing, with another guy, at County Line (across from Neptune's Net), when it came out of the fog bank just offshore. the stranger and i were focused on the horizon, awaiting another wave. we didn't know each other, but it's wise to buddy-up when you're in the drink, so there we were, silent, sitting on boards, acknowledging the expectation that if either one would have a problem, the other'd be close by. it was a good morning of waves, coming in sets about 7-10 minutes apart. between the sets, at exactly 12:20 PDT, what looked to me like a brushed aluminum saucer with a bump in the middle (both top and bottom alike), approached the shoreline from out of the fog bank sitting about a mile offshore. it was the size of a US nickel held at arm's length, and stopped its lateral approach to the shoreline just inside the fog bank, where it was clear and sunny. i couldn't discern any kind of external rotation, but looking back in my mind, i believe perhaps the 'bumps' were rotating, although i couldn't say why. maybe they had black slits or something (which would be subtle when moving, like a zoetrope or something). so it comes in to the sun, settles (at about 300 feet over the ocean surface), and begins an oscillating descent, much like a flat rock in water, or a leaf towards terra. side to side it drops, for about 3 times. on the 4th, it slowed, tilted towards the shore (us), tilted away, and then, in a blink of an eye, took off at a 60 degree climb into and through the fog bank, out to sea. this happened in the space of about 10 seconds, so we had time to process the event as it happened. the non-ballistic demonstration reeked of electromagnetic drive (no sound, super-fast, zero-G). we looked over at each other. "You see that?" "Yep." "((deleted))." silence returned, as we realized deep in the core of our consciousness, that life was deeper than the ocean we surf. we didn't talk any more, transcended by the experience, peaceful that the answer was a resounding 'yes.' the next set of waves passed by while we both kept staring out at the fog bank, boards pointed towards the open ocean, the sky into which the ! 'craft' had disappeared.
Isotopes of darmstadtium Darmstadtium (110Ds) is a synthetic element, and thus a standard atomic weight cannot be given. Like all synthetic elements, it has no stable isotopes. The first isotope to be synthesized was 269Ds in 1994. There are 9 known radioisotopes from 267Ds to 281Ds (with many gaps) and 2 or 3 known isomers. The longest-lived isotope is 281Ds with a half-life of 9.6 seconds. List of isotopes |- | 267Ds | style="text-align:right" | 110 | style="text-align:right" | 157 | 267.14377(15)# | 3(+6−2) µs | α ? | 263Hs ? | 9/2+# |- | 269Ds | style="text-align:right" | 110 | style="text-align:right" | 159 | 269.14475(3) | 230(110) µs[179(+245−66) µs] | α | 265Hs | 3/2+# |- | 270Ds | style="text-align:right" | 110 | style="text-align:right" | 160 | 270.14458(5) | 160(100) µs[0.10(+14−4) ms] | α | 266Hs | 0+ |- | style="text-indent:1em" | 270mDs | colspan="3" style="text-indent:2em" | 1140(70) keV | 10(6) ms[6.0(+82−22) ms] | α | 266Hs | (10)(−#) |- | 271Ds | style="text-align:right" | 110 | style="text-align:right" | 161 | 271.14595(10)# | 210(170) ms | α | 267Hs | 11/2−# |- | style="text-indent:1em" | 271mDs | colspan="3" style="text-indent:2em" | 29(29) keV | 1.3(5) ms | α | 267Hs | 9/2+# |- | 273Ds | style="text-align:right" | 110 | style="text-align:right" | 163 | 273.14856(14)# | 0.17(+17−6) ms | α | 269Hs | 13/2−# |- | 277Ds | style="text-align:right" | 110 | style="text-align:right" | 167 | 277.15591(41)# | 3.5 ms | α | 273Hs | 11/2+# |- | rowspan=2|279Ds | rowspan=2 style="text-align:right" | 110 | rowspan=2 style="text-align:right" | 169 | rowspan=2|279.16010(64)# | rowspan=2|0.18(+5−3) s | SF (90%) | (various) | rowspan=2| |- | α (10%) | 275Hs |- | 280Ds | style="text-align:right" | 110 | style="text-align:right" | 170 | 280.16131(89)# | 6.7 ms | SF | (various) | 0+ |- | rowspan=2|281Ds | rowspan=2 style="text-align:right" | 110 | rowspan=2 style="text-align:right" | 171 | rowspan=2|281.16451(59)# | rowspan=2|9.6 s | SF (94%) | (various) | rowspan=2|3/2+# |- | α (6%) | 277Hs Isotopes and nuclear properties Nucleosynthesis Super-heavy elements such as darmstadtium are produced by bombarding lighter elements in particle accelerators that induce fusion reactions. Whereas most of the isotopes of darmstadtium can be synthesized directly this way, some heavier ones have only been observed as decay products of elements with higher atomic numbers. Depending on the energies involved, the former are separated into "hot" and "cold". In hot fusion reactions, very light, high-energy projectiles are accelerated toward very heavy targets (actinides), giving rise to compound nuclei at high excitation energy (~40–50 MeV) that may either fission or evaporate several (3 to 5) neutrons. In cold fusion reactions, the produced fused nuclei have a relatively low excitation energy (~10–20 MeV), which decreases the probability that these products will undergo fission reactions. As the fused nuclei cool to the ground state, they require emission of only one or two neutrons, and thus, allows for the generation of more neutron-rich products. The latter is a distinct concept from that of where nuclear fusion claimed to be achieved at room temperature conditions (see cold fusion). The table below contains various combinations of targets and projectiles which could be used to form compound nuclei with Z=110. Cold fusion Before the first successful synthesis of darmstadtium in 1994 by the GSI team, scientists at GSI also tried to synthesize darmstadtium by bombarding lead-208 with nickel-64 in 1985. No darmstadtium atoms were identified. After an upgrade of their facilities, the team at GSI successfully detected 9 atoms of 271Ds in two runs of their discovery experiment in 1994. This reaction was successfully repeated in 2000 by GSI (4 atoms), in 2000 and 2004 by the Lawrence Berkeley National Laboratory (LBNL) (9 atoms in total) and in 2002 by RIKEN (14 atoms). The GSI team studied the analogous reaction with nickel-62 instead of nickel-64 in 1994 as part of their discovery experiment. Three atoms of 269Ds were detected. A fourth decay chain was measured but was subsequently retracted. In addition to the official discovery reactions, in October–November 2000, the team at GSI also studied the analogous reaction using a lead-207 target in order to synthesize the new isotope 270Ds. They succeeded in synthesising 8 atoms of 270Ds, relating to a ground state isomer, 270Ds, and a high-spin metastable state, 270mDs. In 1986, a team at the Joint Institute for Nuclear Research (JINR) in Dubna, Russia, studied the reaction: Bi + Co → Ds + n They were unable to detect any darmstadtium atoms. In 1995, the team at LBNL reported that they had succeeded in detecting a single atom of 267Ds using this reaction. However, several decays were not measured and further research is required to confirm this discovery. Hot fusion In 1986, the GSI team attempted to synthesise element 110 by bombarding a uranium-235 target with accelerated argon-40 ions. No atoms were detected. In September 1994, the team at Dubna detected a single atom of 273Ds by bombarding a plutonium-244 target with accelerated sulfur-34 ions. Experiments have been performed in 2004 at the Flerov Laboratory of Nuclear Reactions in Dubna studying the fission characteristics of the compound nucleus 280Ds, produced through the nuclear reaction: Th + Ca → Ds* → fission The result revealed how compound nuclei such as this fission predominantly by expelling magic and doubly magic nuclei such as 132Sn (Z=50, N=82). No darmstadtium atoms were obtained. A compound nucleus is a loose combination of nucleons that have not arranged themselves into nuclear shells yet. It has no internal structure and is held together only by the collision forces between the target and projectile nuclei. It is estimated that it requires around 10−14 s for the nucleons to arrange themselves into nuclear shells, at which point the compound nucleus becomes a nuclide, and this number is used by IUPAC as the minimum half-life a claimed isotope must have to potentially be recognised as being discovered. As decay product Darmstadtium has been observed as a decay product of copernicium. Copernicium currently has seven known isotopes, four of which have been shown to undergo alpha decays to become darmstadtium nuclei, with mass numbers between 273 and 281. Darmstadtium isotopes with mass numbers 277, 279 and 281 to date have only been produced by copernicium nuclei decay. Parent copernicium nuclei can be themselves decay products of flerovium or livermorium. Darmstadtium may also have been produced in the electron capture decay of roentgenium nuclei which are themselves daughters of nihonium, moscovium, or tennessine. To date, no other elements have been known to decay to darmstadtium. For example, in 2004, the Dubna team (JINR) identified darmstadtium-281 as a product in the decay of livermorium via an alpha decay sequence: → + → + → + Retracted isotopes 280Ds The first synthesis of element 114 resulted in two atoms assigned to 288Fl, decaying to the 280Ds, which underwent spontaneous fission. The assignment was later changed to 289Fl and the darmstadtium isotope to 281Ds. Hence, 280Ds remained unknown until 2016, when it was populated by the hitherto unknown alpha decay of 284Cn (previously, that nucleus was only known to undergo spontaneous fission). 277Ds In the claimed synthesis of 293Og in 1999, the isotope 277Ds was identified as decaying by 10.18 MeV alpha emission with a half-life of 3.0 ms. This claim was retracted in 2001. This isotope was finally created in 2010 and its decay data supported the fabrication of previous data. 273mDs In the synthesis of 277Cn in 1996 by GSI (see copernicium), one decay chain proceeded via 273Ds, which decayed by emission of a 9.73 MeV alpha particle with a lifetime of 170 ms. This would have been assigned to an isomeric level. This data could not be confirmed and thus this isotope is currently unknown or unconfirmed. 272Ds In the first attempt to synthesize darmstadtium, a 10 ms SF activity was assigned to 272Ds in the reaction 232Th(44Ca,4n). Given current understanding regarding stability, this isotope has been retracted from the table of isotopes. Nuclear isomerism 281Ds The production of 281Ds by the decay of 289Fl or 293Lv has produced two very different decay modes. The most common and readily confirmed mode is spontaneous fission with a half-life of 11 s. A much rarer and as yet unconfirmed mode is alpha decay by emission of an alpha particle with energy 8.77 MeV with an observed half-life of around 3.7 min. This decay is associated with a unique decay pathway from the parent nuclides and must be assigned to an isomeric level. The half-life suggests that it must be assigned to an isomeric state but further research is required to confirm these reports. It was suggested in 2016 that this unknown activity might be due to 282Mt, the great-granddaughter of 290Fl via electron capture and two consecutive alpha decays. 271Ds Decay data from the direct synthesis of 271Ds clearly indicates the presence of two nuclear isomers. The first emits alpha particles with energies 10.74 and 10.69 MeV and has a half-life of 1.63 ms. The other only emits alpha particles with an energy of 10.71 MeV and has a half-life of 69 ms. The first has been assigned to the ground state and the latter to an isomeric level. It has been suggested that the closeness of the alpha decay energies indicates that the isomeric level may decay primarily by delayed isomeric transition to the ground state, resulting in an identical measured alpha energy and a combined half-life for the two processes. 270Ds The direct production of 270Ds has clearly identified two nuclear isomers. The ground state decays by alpha emission into the ground state of 266Hs by emitting an alpha particle with energy 11.03 MeV and has a half-life of 0.10 ms. The metastable state decays by alpha emission, emitting alpha particles with energies of 12.15, 11.15, and 10.95 MeV, and has a half-life of 6 ms. When the metastable state emits an alpha particle of energy 12.15 MeV, it decays into the ground state of 266Hs, indicating that it has 1.12 MeV of excess energy. Chemical yields of isotopes Cold fusion The table below provides cross-sections and excitation energies for cold fusion reactions producing darmstadtium isotopes directly. Data in bold represent maxima derived from excitation function measurements. + represents an observed exit channel. Fission of compound nuclei with Z=110 Experiments have been performed in 2004 at the Flerov Laboratory of Nuclear Reactions in Dubna studying the fission characteristics of the compound nucleus 280Ds. The nuclear reaction used is 232Th+48Ca. The result revealed how nuclei such as this fission predominantly by expelling closed shell nuclei such as 132Sn (Z=50, N=82). Theoretical calculations Decay characteristics Theoretical calculation in a quantum tunneling model reproduces the experimental alpha decay half live data. It also predicts that the isotope 294Ds would have alpha decay half-life of the order of 311 years. Evaporation residue cross sections The below table contains various targets-projectile combinations for which calculations have provided estimates for cross section yields from various neutron evaporation channels. The channel with the highest expected yield is given. DNS = Di-nuclear system; σ = cross section References Isotope masses from: Isotopic compositions and standard atomic masses from: Half-life, spin, and isomer data selected from the following sources. Category:Darmstadtium Darmstadtium
UNPUBLISHED UNITED STATES COURT OF APPEALS FOR THE FOURTH CIRCUIT No. 11-4857 UNITED STATES OF AMERICA, Plaintiff – Appellee, v. TERRY WIGGS, Defendant - Appellant. Appeal from the United States District Court for the Northern District of West Virginia, at Clarksburg. Irene M. Keeley, District Judge. (1:07-cr-00048-IMK-JSK-1) Submitted: February 16, 2012 Decided: February 21, 2012 Before SHEDD, KEENAN, and WYNN, Circuit Judges. Affirmed by unpublished per curiam opinion. Brian J. Kornbrath, Federal Public Defender, Clarksburg, West Virginia; Kristen Leddy, Research and Writing Specialist, Martinsburg, West Virginia, for Appellant. William J. Ihlenfeld II, United States Attorney, Shawn Angus Morgan, Assistant United States Attorney, Clarksburg, West Virginia, for Appellee. Unpublished opinions are not binding precedent in this circuit. PER CURIAM: Terry Lee Wiggs appeals from his twenty-four-month sentence imposed upon revocation of his supervised release. On appeal, Wiggs alleges that his sentence was plainly unreasonable. We affirm. A district court has broad discretion to impose a sentence upon revoking a defendant’s supervised release. United States v. Thompson, 595 F.3d 544, 547 (4th Cir. 2010). We will affirm a sentence imposed after revocation of supervised release if it is within the applicable statutory maximum and is not “plainly unreasonable.” United States v. Crudup, 461 F.3d 433, 439-40 (4th Cir. 2006). In determining whether a revocation sentence is plainly unreasonable, we first assess the sentence for reasonableness, “follow[ing] generally the procedural and substantive considerations that we employ in our review of original sentences.” Id. at 438. A supervised release revocation sentence is procedurally reasonable if the district court considered the Sentencing Guidelines’ Chapter 7 advisory policy statements and the 18 U.S.C. § 3553(a) (2006) factors that it is permitted to consider in a supervised release revocation case. See Crudup, 461 F.3d at 439. Although the court need not explain the reasons for imposing a revocation sentence in as much detail as when it imposes an original sentence, “it still must provide a statement of reasons for the 2 sentence imposed.” Thompson, 595 F.3d at 547 (internal quotation marks omitted). A revocation sentence is substantively reasonable if the district court stated a proper basis for concluding the defendant should receive the sentence imposed, up to the statutory maximum. Crudup, 461 F.3d at 440. Only if a sentence is found procedurally or substantively unreasonable will we “then decide whether the sentence is plainly unreasonable.” Id. at 439. After review of the record, we conclude that the revocation sentence is not plainly unreasonable. The twenty-four month prison term does not exceed the applicable maximum allowed by statute. The district court considered the argument of Wiggs’ counsel, the Guidelines advisory range, the recommendation of the Government, and relevant § 3553(a) factors, addressing on the record Wiggs’ history and characteristics, the nature and circumstances of his violative behavior, the need for the sentence to deter Wiggs, and Wiggs’ breach of trust following prior lenient treatment. See 18 U.S.C. § 3553(a)(1), (a)(2)(B)-(C); U.S. Sentencing Guidelines Manual Ch. 7, Pt. A, introductory cmt. 3(b) (2010). The district court adequately explained its rationale for imposing sentence, and the reasons relied upon are proper bases for the sentence imposed. 3 Accordingly, we conclude that Wiggs’ sentence was reasonable, and we affirm the district court’s order imposing the twenty-four-month prison sentence. We dispense with oral argument because the facts and legal contentions are adequately presented in the materials before the court and argument would not aid the decisional process. AFFIRMED 4
The ability to elucidate the primary structure of extremely small (picomole) amounts of biologically relevant proteins has had a profound impact in the biological and medical sciences. This capability, due to the recent development of protein microsequencing and amino acid analyzing technologies, has extended our knowledge of the structure and function of normal and abnormal proteins in health and disease. Furthermore, with the recent advances in the fields of recombinant DNA and genetic engineering, protein microsequencing is often a mandatory first step necessary for the design and synthesis of specific DNA probes needed to study gene structure and function. After probes are synthesized and used to identify genes, both sequencing of the DNA as well as synthesis of both peptides and additional DNA sequences from regions of the DNA sequence are necessary. These peptides are then used as immunological probes. This proposal is to place a state-of-the-art research grade protein and DNA sequencing system as well as protein synthesis into this hospital environment for use in a variety of basic research projects relating to biopolymers. Potential users for such biopolymer equipment in non-clinical research include the study of: (1) the molecular pathology of Alzheimer's disease and other degenerated diseases of the nervous system; (2) the isolation of the gene for human ferrochelatase; (3) post-translational modification of proteins; (4) molecular biology of human myeloid differentiation; (5) pulmonary surfactant proteins; (6) ionizing radiation-induced DNA damage and repair; (7) purification of the Type II thyroxine 5' deiodinase and cloning of its gene; (8) site- specific mutagenesis of von Willebrand's factor; (9) biochemistry, molecular biology and biological action of eosinophil and basophil proteins; (10) molecular biology of the polio virus receptor. The Biopolymer Lab will be placed in a laboratory dedicated to synthesis and structure studies and will become a central feature of the research efforts of many investigators at the hospital; it will greatly enhance our ability to make progress in these several areas.
Crystallization and preliminary crystallographic analysis of the Ras binding domain of RalGDS, a guanine nucleotide dissociation stimulator of the Ral protein. The RalGDS is a guanine nucleotide dissociation stimulator which activates the Ral protein, a Ras-like small GTPase. The C-terminal domain of the RalGDS (C-RalGDS) binds tightly to the effector loop of Ras suggesting that the RalGDS may be a crossing point of two signal tranduction pathways associated with the Ras and Ral proteins. C-RalGDS has been purified and crystallized in space group C2, with unit-cell dimensions a = 108.8, b = 30.7, c = 51.3 A, beta = 91.7 degrees at 277 K and a = 103.8, b = 30.55, c = 51.4 A, beta = 94.9 degrees for data collected at 100 K. The crystals diffract to 1.8 A at a synchrotron radiation source. To use the multiple-wavelength anomalous diffraction method for phasing, a selenomethionine derivative of the protein has also been crystallized.
Description Oh Henry! Candy bar containing peanuts, caramel and fudge coated in chocolate. All these ingredients guarantee a great taste and a lot of energy for those who enjoy it. Packed in a size that allows you to take it everywhere, you can eat it anywhere. 4 bars for package. Visit for more Nestle options.
--- abstract: 'Crosstalk, also known by its Chinese name [*xiangsheng*]{}, is a traditional Chinese comedic performing art featuring jokes and funny dialogues, and one of China’s most popular cultural elements. It is typically in the form of a dialogue between two performers for the purpose of bringing laughter to the audience, with one person acting as the leading comedian and the other as the supporting role. Though general dialogue generation has been widely explored in previous studies, it is unknown whether such entertaining dialogues can be automatically generated or not. In this paper, we for the first time investigate the possibility of automatic generation of entertaining dialogues in Chinese crosstalks. Given the utterance of the leading comedian in each dialogue, our task aims to generate the replying utterance of the supporting role. We propose a humor-enhanced translation model to address this task and human evaluation results demonstrate the efficacy of our proposed model. The feasibility of automatic entertaining dialogue generation is also verified.' author: - | Shikang Du, Xiaojun Wan, Yajie Ye\ Institute of Computer Science and Technology, The MOE Key Laboratory of Computational Linguistics\ Peking University, Beijing 100871, China\ {dusk, wanxiaojun, yeyajie}@pku.edu.cn\ bibliography: - 'aaai18.bib' title: Towards Automatic Generation of Entertaining Dialogues in Chinese Crosstalks --- [UTF8]{}[gbsn]{} Introduction ============ Crosstalk, also known by its Chinese name *相声/xiangsheng*, is a traditional Chinese comedic performing art, and one of China’s most popular cultural elements. It is typically in the form of a dialogue between two performers, but much less often can also be a monologue by a solo performer, or even less frequently, a group act by multiple performers. The crosstalk language, rich in puns and allusions, is delivered in a rapid, bantering style. The purpose of Xiangsheng is to bring laughter to the audience, and the crosstalk language features humorous dialogues [@link1979genie; @moser1990reflexivity; @terence2013china; @mackerras2013performing]. The language style of crosstalk is just like chatting or gossip, but is more funny and humorous, especially in crosstalks given by two performers. It would be an ideal resource for studying humor in dialogue system. However, there are some special rules in crosstalks. For the crosstalk between two performers, one person acts as the leading comedian (or [*逗哏/dougen*]{} in Chinese) and the other as the supporting role (or [*捧哏/penggen*]{}). The two performers usually stand before an audience and deliver their lines in rapid fire by turn. They echo each other in the crosstalk performance. In each turn, the leading role usually tells stories and jokes, or does some sound imitation in his utterance, and the supporting role points out the humorous point in the leading role’s performance, or even adds fuel to the leading role’s performance, making it funnier. For example, -------- ------------------------------------------------- **A**: 楚国大夫屈原,五月初五死的,我们 应该永远怀念屈原。要是没有屈原, 我们怎么能有这三天假期呢? The mid-autumn festival is in memory of Qu Yuan. We should keep him in mind forever, because his death brings us this 3-day holiday. **B**: 这个,代价大点儿。 It costs him a lot (to have a holiday). **A**: 我觉得应该再多放几天假。 I think it would be better with more holidays. **B**: 那得死多少人啊。 How many people would die then! -------- ------------------------------------------------- In this example, B acts as the supporting role. His last response unexpectedly links the number of holiday with the number of people died, which makes the whole dialogue more funny. But in many cases, the supporting one acts as a go-between, gives positive response (such as “当然/Of course” or “这样/That’s why”) or negative response (such as “啊?/Ah?”), and sometimes repeats key points in the leading role’s utterance, making the narration given by the leading role go smoothly (e.g. A: 虽然道路崎岖,所幸还有蒙蒙月色/ Although the road is rough, the moonlight is bright. B:还能看见点/ We can still see things on the road.) In brief, the crosstalk between two performers can be considered a special and challenging dialogue form - the entertaining dialogue. Though general dialogue generation has been widely explored and achieved great success in previous studies [@li-EtAl:2016:EMNLP20162; @sordoni-EtAl:2015:NAACL-HLT; @ritter2011data], it is unknown whether such entertaining dialogues can be automatically generated or not. If computers can generate entertaining dialogues well, the AI ability of computer will be further validated. The function of generating entertaining dialogues is also very useful in many interactive products, making them more appealing. In this study, we for the first time investigate the possibility of automatic generation of entertaining dialogues in Chinese crosstalks. Given the utterance of the leading comedian in each dialogue, our task aims to generate the replying words of the supporting role. We propose a humor-enhanced translation model to address this special and challenging task, and the model explicitly leverages a sub-model to measure the humorous characteristic of a dialogue. Human evaluation results on a real Chinese crosstalk dataset demonstrate the efficacy of our proposed model, which can outperform several retrieval based and generation based baselines. The feasibility of automatic entertaining dialogue generation is also verified. The contributions of this paper are summarized as follows: 1\) We are the first to investigate the new task of entertaining dialogue generation in Chinese crosstalks. 2\) We propose a humor-enhanced translation model to address this challenging task by making use of a sub-model to measure the humorous characteristic of a dialogue. 3\) Manual evaluation is performed to verify the efficacy of our proposed model and the feasibility of automatic entertaining dialogue generation. In the rest of this paper, we will first describe the details of our proposed model and then present and discuss the evaluation results. After that, we introduce the related work. Lastly, we conclude this paper. Our Generation Method ===================== Given an utterance $\mathbf{s}$ of the leading role (i.e. *dougen*) in Chinese crosstalks, our task aims to generate the replying utterance $\mathbf{r}$ of the supporting role (i.e. [*penggen*]{}), which is called crosstalk response generation (CRG). The generated utterance needs to be fluent and related to the leading role’s utterance. Moreover, it is also expected that the generated utterance can make the dialogue more funny and entertaining. As mentioned earlier, our task is a special form of dialogue generation. In recent years, there are many methods proposed for dialogue generation based on a large set of training data, including the deep learning methods (especially sequence-to-sequence models) [@li-EtAl:2016:EMNLP20162]. However, deep learning methods usually require a large training set to achieve good performance in dialogue generation tasks, which is hard to obtain for our task. So, we choose a more traditional but effective way based on machine translation to address the new task of crosstalk response generation. *penggen* often gives comments on *dougen*’s utterance, sometimes *penggen* even retells the *dougen*’s words but in a more humorous way. We believe that the *dougen*’s response has some potential patterns according to the utterance given by *penggen*, and treat response generation as a monolingual translation problem, in which the given input (utterance given by *dougen*) is treated as the foreign language and the humorous response as the source language. Machine translation (MT) has already been successfully used in response generation [@ritter2011data], in which input post was seen as a sequence of words, and word or phrase based translation was made to generate another sequence of words as response. If we simply treat crosstalk response generation as a general dialogue generation problem, we can apply statistical machine translation (SMT) model [@koehn2003statistical] to generate responses accordingly, ignoring the entertaining characteristic of crosstalk. In machine translation, beam search is used in decoding process, which could generate multiple candidates with scores. Usually only the candidate with the highest score could be accepted. These scores reflects the similarity of the candidate and reference. However, just like that some question may have many different answers, there might still be acceptable, or even unexpected but wonderful candidates with lower scores. It’s a pity to get these good response ignored just because they shares little similarity with the references in a limited training dataset. To exploit them, and also to address the crosstalk generation problem, we propose a humor-enhanced machine translation model to generate response utterance in crosstalk. Our proposed model leverages a sub-model to explicitly model the degree of humor of a dialogue, and integrate it with other sub-models, as illustrated in **Figure \[fig:sys\]**. ![\[fig:sys\] General architecture of our system](xiangsheng_high_level.pdf){width="80mm"} Response Generation Model ------------------------- We get pairs of aligned utterance and response from the dialogue fragments in Chinese crosstalks, which are considered monolingual parallel data. The two performers echo each other in a crosstalk, and their roles keep consistent in the whole crosstalk, and the leading role and the supporting role of each utterance can be easily identified. Then we segment the utterances into words. Each pair consists of a sequence of words $\mathbf{s} (\{s_1, s_2, ..., s_l\})$ spoken by the leading role, and a sequence of words $\mathbf{ref}$ replied by the supporting role, while the response we generated is denoted as $\mathbf{r} (\{r_1, r_2, ..., r_l\})$. Given the leading role’s utterance $\mathbf{s}$, we aim to generate the best response utterance $\mathbf{r}$ by using our proposed generation model. The proposed generation model has three sub-models(M1, M2, M3): translation model, language model and humor model. We will introduce each sub-model and then introduce the framework of model combination. ### Translation Model (M1) The translation model translates the given leading role’s utterance $\mathbf{s}$ into a sequence of words $\mathbf{r}$, which is treated as the response. Let $({s}_i, {r}_i)$ be a pair of translation units, we can compute the word translation probability distribution $\phi_{tm}({s}_i, {r}_i)$ , which is defined in [@koehn2003statistical]. Each word $s_i$ in input utterance is translated to a word in response $r_i$, and the word in response would be reordered. Reordering of generated response is modeled by a relative distortion probability distribution $d(a_i - b_{i-1})$, where $a_i$ is the starting position of the word in the input utterance $\mathbf{s}$ translated to the $i$-th word in generated response $\mathbf{r}$, and $b_{i-1}$ denotes the end position of the word in the input utterance translated into the $(i-1)$-th word in the response. We use $d=\alpha^{|x-1|}$ as implementation. Thus, the translation score between the leading role’s utterance $\mathbf{s}$ and generated response $\mathbf{r}$ is: $$% p_{tm}\left( \mathbf{s}, \mathbf{r} \right) = D(\mathbf{s}, \mathbf{r}) \prod_{i=1}^{l}\phi_{tm}({s}_i, {r}_i) p_{tm}\left( \mathbf{r}, \mathbf{s} \right) = \prod_{i=1}^{l}\phi_{tm}({s}_i, {r}_i) d(a_i - b_{i-1})$$ ### Language Model (M2) We use a 4-gram language model in this work. The language model based score is computed as: $$p_{lm}({\mathbf r}) = \prod_j p(r_j|r_{j-3}r_{j-2}r_{j-1})$$ where $r_j$ is the $j$-th element of $\mathbf r$. ### Humor Model (M3) We want to build a model to measure the degree of humor of a dialogue. However, humor is very complex. In Chinese crosstalks, humor can be expressed by the actors’ tone, body language and verbal language. In this study, we mainly focus on modeling the verbally expressed humor in crosstalks. We build a classifier to determine the probability of being humorous for each response candidate in the context of the input utterance. In this model, we evaluate humor in 4 dimensions, just as the same as [@yang2015humor] : (a) Incongruity, (b) Ambiguity, (c) Interpersonal Effect, and (d) Phonetic Style. Incongruity structure plays an important role in verbal humor, as stated in [@lefcourt2001humor] and [@paulos2008mathematics]. Although it is hard to determine incongruity, it is relatively easier to calculate the semantic disconnection in a sentence. We use Word2vec to derive the word embeddings and then compute the distances between word vectors. When a listener expects one meaning, but is forced to use another meaning [@yang2015humor], there is ambiguity. This distraction often makes people laugh. To measure the ambiguity in the sentence, we collect a number of antonyms and synonyms for feature extraction. Note that antonyms are used as as an important feature in humor detection in [@mihalcea2005making]. Using Chinese WordNet [@huang2010infrastructure], we get the pairs of antonyms and synonyms. Interpersonal effect is associated with sentimental effect [@zhang2014recognizing]. A word with sentimental polarity reflects the emotion expressed by the writer. We use a dictionary in [@Xu2008] to compute the sentimental polarity of each word, and add them up as the overall sentimental polarity of a sentence. Many humorous texts play with sounds, creating incongruous sounds or words. Homophonic words have more potential to be phonetically funny. We count the number of homophonic words and words with the same rhyme, with the help of pypinyin[^1] . Furthermore, adult slang is described in [@mihalcea2005making] as a key feature to recognize jokes, so we count the number of slangs. Note that we extract features from the response alone and also extract features from the whole turn of dialogue consisting of both the given input utterance and the response. To summarize, the features we use are listed below: - minimum and maximum distances of each pair of word vectors in the response; - minimum and maximum distances of each pair of word vectors in the whole turn of dialogue (including the given input utterance and the response); - number of pairs of antonyms in the response; - number of pairs of antonyms in the whole turn of dialogue; - number of pairs of synonyms in the response; - number of pairs of synonyms in the whole turn of dialogue; - sentimental polarity in the response; - sentimental polarity in the whole turn of dialogue; - number of homophonic words in the response; - number of homophonic words in the whole turn of dialogue; - number of the words with same rhyme in the response; - number of the words with same rhyme in the whole turn of dialogue; - number of slangs in the response. We choose the random forest classifier [@Liaw2002] because it generally outperforms other classifiers based on our empirical analysis. The output probability for $\mathbf{r}$ is used as the humor model score $p_{hm}({\mathbf r})$. ### Model Combination We use a log-linear framework to combine the above three sub-models and get our response generation model. Note that the translation model corresponds to two parts. $$\begin{split} p({\mathbf r}|{\mathbf s}) &= \lambda_{tm} \sum_i \log \phi_{tm}(s_i, r_i)\\ &+ \lambda_{ds} \sum_i \log d(a_i - b_{i-1})\\ &+ \lambda_{lm} \sum_j \log p(r_j|r_{j-3}r_{j-2}r_{j-1})\\ &%+ \lambda_{crm} \log p_{crm}({\mathbf r}) + \lambda_{hm} \log p_{hm}({\mathbf r}) \end{split}$$ where $\lambda_{tm}$, $\lambda_{ds}$, $\lambda_{lm}$and $\lambda_{hm}$ are weight parameters of the sub-models and can be learned automatically. Learning and Decoding --------------------- In the model M1, we use relative frequency to estimate the word translation probability distribution $\phi_{tm}({s}_i, {r}_i)$, and no smoothing is performed. $$\phi_{tm}(s, r) = \frac{\mathrm{count}(s,r)}{\sum_s (s,r)}$$ A special token NULL is added to each utterance and aligned to each unaligned foreign word. The training process is similar to that in [@ritter2011data]. We use the widely used toolkit Moses [@koehn2007moses] to train the translation model. The scikit-learn toolkit[^2] is used and the probability of prediction is acquired through the API function of [predict\_proba]{}. In order to estimate weight parameters in the combined model, we apply the minimum error rate training (MERT) algorithm [@Och:2003:MER:1075096.1075117], which has been broadly used in SMT. The most common optimization objective function is BLEU-4 [@papineni2002bleu], which requires human references. We take the original human response derived from our parallel corpus as the single reference. We use the tool Z-MERT [@zaidan2009z] for estimation. The weight parameter values that lead to the highest BLEU-4 scores on the development set are finally selected. In the decoding process, we use the beam search algorithm to generate the top-100 best response candidates for each input utterance based on M1 and M2. Then we obtain the score of M3 of each candidate, rank the candidates according to the combined model and select the best candidate as output. Final Reranking with the Humor Model ------------------------------------ Note that the above combined model is optimized for the BLEU-4 score, but the BLEU-4 score cannot well reflect the humorous aspect of generated responses, so in order to improve the humor level of a dialogue, we further select the top-five best response candidates generated by the above combined model and rerank them according to the score of the humor model (M3), and finally use the top-ranked one as the output. Note that We use only top five candidates in this step because it is more efficient and effective to rerank a small number of high-quality candidates, while the readability and relevance of other candidates with low ranks cannot be guaranteed. The number of five is determined based on the development set. Experiment Setup ================ Experiment Data --------------- We collect the crosstalk data from multiple sources: (a) published books[^3]; (b) websites[^4] , where Chinese crosstalk fans collect and collate existing famous crosstalk masterpieces. (c) records of crosstalk play. The dataset we collect consists of over $173,000$ pairs of utterances, from $1,551$ famous excerpts of crosstalks. Since long sentences would slow down our training process, we filtered out responses longer than 60 words. In order to improve qualty, we also filtered out very short responses that are usually 1 modal particles. Over $150,000$ utterances was used in our dataset after this process. We divide the pairs of utterances and responses in the dataset into three parts, and we randomly select $2000$ pairs as the test set, $4000$ pairs as the development set for weight parameter estimation, and the rest as the training set for translation model. Since training language model requires a large-scale dataset, which could hardly be offered in the domain of crosstalks, we add Chinese microblog messages from Sina Weibo[^5] to enlarge the corpus for language model training. The language styles in Weibo and Chinese crosstalks are quite similar in that the sentences in Weibo messages and crosstalks are usually short and informal. We collect 6 million pieces of Weibo messages and comments from Sina Weibo. Not all utterances in Chinese crosstalks are humorous, because there are many utterances serving as go-betweens, so we have to manually build the training data for humor model learning. Because of the lack of Chinese humorousness dataset, we randomly collect $6000$ pairs of utterances in Chinese crosstalks, and manually label them into two classes: [*humorous*]{} or [*not humorous*]{}. $348$ pairs are marked as [*humorous*]{}, and we replicate the minor class instances and remove some major class instances to make the class distribution more balanced. Then we use the labeled data for training the random forest classifier in the humor model. Comparison Methods ------------------ We implement retrieval based methods for comparison: - [**Seq2Seq**]{}: Treat this problem as translation problem with <span style="font-variant:small-caps;">Seq2Seq</span> model with attention. GRU cells are used in RNN, and number of cells are 256. - [**Ir-Ur**]{}: Retrieve the response which is most similar to the input utterance from both the development set and the training set. - [**Ir-Uu**]{}: Retrieve the most similar utterance to the input utterance, and then return the response associated with the retrieved utterance; - [**Ir-Cxt**]{}: Retrieve the response which is most similar to the input utterance and three previous utterances of the input utterance; Similarity was calculated by comparing word-level cosine similarity. Our proposed method consists of all the three sub-models (including the final selection step), named as [**Smt-H**]{}. We further compare our method with the basic machine translation method considering two sub-models M1, M2, named as [**Smt**]{}. Note that in our method, the humor model is used in both the combined model and the final reranking process. Evaluation Metrics ------------------ We adopt human evaluation to verify the effectiveness of our system. We also report automatic evaluation results with BLEU [@papineni2002bleu]. But in the dataset only one reference response is provided for each given utterance, and the humor aspect cannot be well captured by the BLEU metrics. So we rely both on automatic and human evaluation results for this special task. We employ two human judges to rate each generated response in three aspects: **Readability**: It reflects the grammar correctness and fluency; **Entertainment**: It reflects the level of humor of the response; **Relevance**: It reflects the semantic relevance between the input utterance and the response generated. It also reflects logic and sentimental consistency. Each judge is asked to assign an integer score in the range of 0 $\sim$ 2 to each generated response with respect to each aspect. The score $0$ means “poor” or “not at all”, $2$ means “good” or “very well”, and $1$ means “partially good” or “acceptable”. For example, In readability, $1$ means that there are some grammar mistakes but human evaluator can still understand the meaning of the response. To help human raters to determine whether the generated response is relevant to the input utterance, we also provide previous two rounds of dialogues of the input utterance to the raters. Result and Analysis =================== Automatic Evaluation Results ---------------------------- As shown in \[res-auto\], we found the BLEU score of our <span style="font-variant:small-caps;">Smt-H</span> is higher than baselines in the $2000$ utterances test set, which can be simply explained since more global features could be accessed in our <span style="font-variant:small-caps;">Smt-H</span> model. *t*-test results on BLEU scores of the two model show that their difference is significant ($p< 1\times 10{-5}$). BLEU-4 BLEU-3 BLEU-2 BLEU-1 ------------------------------------------------------- ----------- ----------- ----------- ----------- <span style="font-variant:small-caps;">Smt-H</span> **16.62** **18.99** **22.41** **29.57** <span style="font-variant:small-caps;">Smt</span> 15.13 17.39 20.62 27.39 <span style="font-variant:small-caps;">Seq2Seq</span> 16.03 17.76 20.63 27.66 <span style="font-variant:small-caps;">Ir-Uu</span> 2.6 3.51 5.52 12.53 <span style="font-variant:small-caps;">Ir-Ur</span> 4.4 5.15 6.83 13.13 <span style="font-variant:small-caps;">Ir-Cxt</span> 3.14 4.17 6.34 13.76 <span style="font-variant:small-caps;">Rnd</span> 0.00 0.00 1.65 9.73 : Automatic Evaluation Result of <span style="font-variant:small-caps;">Smt</span>, <span style="font-variant:small-caps;">Smt-H</span> and <span style="font-variant:small-caps;">Seq2Seq</span>[]{data-label="res-auto"} As expected, the <span style="font-variant:small-caps;">Seq2Seq</span> model could get better scores in our automatic test than ordinary <span style="font-variant:small-caps;">Smt</span> model, but not better than our <span style="font-variant:small-caps;">Smt-H</span> model. A larger training set might help improve performance of the deep learning based model. BLEU scores of all <span style="font-variant:small-caps;">Ir</span> based models are lower than $5\%$ One possible reason is that the crosstalk dataset is not very large, and the utterances in the dataset are very diversified, so it is hard for retrieval based methods to find proper responses from the dataset directly. While generation based methods are more flexible and they can generate new responses for input utterances. For the retrieval based methods, IR-UR performs better than IR-UU, which is contrary to our intuition. This phenomenon has been discussed in [@ritter2011data]. Human Evaluation Results ------------------------ We randomly selected $150$ input utterances in the test set and asked two raters to label the responses generated by retrieval based methods and machine translation based methods. The percentage of each rating level is calculated for each method with respect to each aspect, as shown in **Figure \[fig:score\]**. Since retrieval based methods extract existing utterances directly from the dataset, the readability of the retrieved responses is usually very good and thus we do not need to label the readability of these responses. We further compute the relative ratio of the average rating score of each method to the average rating score of the basic [Smt]{} model with respect to each aspect, as shown in **Table \[res-relative\]**, and a ratio score larger than 100% means the corresponding method performs better than the basic [Smt]{} model, while a ratio score lower than 100% means the corresponding method performs worse than the basic [Smt]{} model. <span style="font-variant:small-caps;">Smt</span> <span style="font-variant:small-caps;">Smt-H</span> <span style="font-variant:small-caps;">Seq2Seq</span> <span style="font-variant:small-caps;">Ir-Uu</span> <span style="font-variant:small-caps;">Ir-Ur</span> <span style="font-variant:small-caps;">Ir-Cxt</span> --------------- --------------------------------------------------- ----------------------------------------------------- ------------------------------------------------------- ----------------------------------------------------- ----------------------------------------------------- ------------------------------------------------------ Readability 100.00% 100.41% 110.74% - - - Entertainment 100.00% 120.59% 69.12% 4.47% 8.94% 13.41% Relevance 100.00% 98.42% 99.74% 28.69% 58.97% 39.84% As can be seen from the human evaluation results, the responses generated by machine translation based methods are much more entertaining and relevant than retrieval based methods. We also find that that [Ir-Ur]{} returns more relevant responses than [Ir-Uu]{}. It also reveal that the translation model could generate more entertaining but less fluent response than <span style="font-variant:small-caps;">Seq2Seq</span> model. It could be explained that the responses generated by <span style="font-variant:small-caps;">Seq2Seq</span> model are too ordinary to get people feel amused. Comparing [Smt]{} with [Smt-H]{}, we can see that [Smt-H]{} receives higher rating scores than [Smt]{} with respect to fluency and entertainment. The comparison results demonstrate that the use of the humor model can indeed make the generated responses more entertaining, which is very important for Chinese crosstalks. An auxiliary effect by using the humor model is to improve the readability of the generated responses. Now we show two examples of input utterances and different responses generated by [Ir-Uu]{}, [Smt]{} and [Smt-H]{} as follows: --------------------------------------------------------------- ------------------------------------- **A**: 躲?我把扁担一横,立托千斤 *(context)* 闸,迎着他的铁棍——咔! /Hide? I bravely used my shoulder pole to meet against his iron bar... Crack! **B**: 铁棍飞 。/His iron bar dropped? **A(input)**: 扁担折了。 /No, my shoulder pole was fractured. **B(<span style="font-variant:small-caps;">Smt</span>)**: 搁在一块儿说个绕嘴的折了拿 什么打? /Put them together and tell me a tongue twister, what else could be used to meet against his weapon? **B(<span style="font-variant:small-caps;">Smt-H</span>)**: 好 — 折啦 ? /What? It was fractured? **B(<span style="font-variant:small-caps;">Seq2Seq</span>)**: 这回该二朝的关系? / (*Broken sentences*) **B(<span style="font-variant:small-caps;">Ir-Uu</span>)**: 都知道。 /We all know that. --------------------------------------------------------------- ------------------------------------- **B***(context)*: 老乡们 / My Fellow villagers --------------------------------------------------------------- ------------------------------- **A(input)**: 猪尾巴 / The pork tail **B(<span style="font-variant:small-caps;">Smt</span>)**: 噢!才生下来的?你? /Were just born? You? Are you kidding me? **B(<span style="font-variant:small-caps;">Smt-H</span>)**: 注意吧 / Pay attention **B(<span style="font-variant:small-caps;">Seq2Seq</span>)**: 注意吧 / Pay attention **B(<span style="font-variant:small-caps;">Ir-Uu</span>)**: 嗬 / Ho In the first example, the response generated by <span style="font-variant:small-caps;">Smt-H</span> is more related to the input utterance. In the second example, the Chinese phrase 猪尾巴/“pork tail” has the same pronunciation with 注意吧/“pay attention” in the response generated by <span style="font-variant:small-caps;">Smt-H</span>. It is laughable since the supporting role (B) distorts A’s utterance’s meaning with a skill of homophonic, while the responses generated by <span style="font-variant:small-caps;">Smt</span> and <span style="font-variant:small-caps;">Ir-Uu</span> are totally irrelevant. Discussion ---------- Our method works well with short input. It can generate appropriate responses which act as go-between in narration. For example, **A**: 你爸爸穿衣裳也讲究。/ Your father is dainty about his dress. **B**(<span style="font-variant:small-caps;">Smt-H</span>): 怎么讲究?/ Dainty? How ? Some responses generated by our method are entertaining. For example, **A**: 龙生龙, 凤生凤, 老鼠的儿子会打洞/Dragon born dragon, chicken born chicken, mouse’s son could only make hole. (*You are just like a mouse.*) **B**(<span style="font-variant:small-caps;">Smt-H</span>): 你不是后继无鼠了吗这代, 您这套我都会了。/ You even don’t have a mouse-like successor! I’ve found out your strategy. With the use of the humor model in the combined model and the use of it in the final reranking process, our method can generate better response. For example, **A**: 比如说我是天上的一颗星星。/ For example, I am a star in the heaven. **B**(<span style="font-variant:small-caps;">Smt-H</span>): 噢这是不是。 / Oh, is it this one? **B**(<span style="font-variant:small-caps;">Smt</span>): 是噢是你。 / Well, it’s you. **B**(<span style="font-variant:small-caps;">Seq2Seq</span>): 可不对。 / That’s right. However, there are still several shortcomings for our method: 1\) Some generated responses are not fluent and the readability is not good. Some responses are broken sentences. For example, **A**: 好,新春进步!/Well, hope you make progress in new spring. **B**(<span style="font-variant:small-caps;">Smt-H</span>): 不春进步了。/Not spring progress. The reason may be that the current crosstalk corpus is not adequate for training a high-quality language model, but unfortunately it is hard to obtain a large crosstalk corpus because fewer and fewer people still work on this performing art and create new crosstalks. 2\) In some cases, our method will only give the input words back without translation and rewriting (e.g. 八匹马呀/Ah, there are eight horses). This may be caused by the data sparsity problem in the dataset. If the words or expressions do not or seldom appear in the training corpus, our method cannot find any “translations” to them and can only return them back directly. Related Work ============ The most closely related work is dialogue generation Previous work in this field relies on rule-based methods, from learning generation rules from a set of authored labels or rules [@Oh:2000:SLG:1117562.1117568; @banchs2012iris] to building statistical models based on templates or heuristic rules [@levin2000stochastic; @pieraccini2009we]. li-EtAl:2017:EMNLP20175 After the explosive growth of social networks, the large amount of conversation data enables the data-driven approach to generate dialogue. Research on statistical dialogue systems fall into two categories: 1) information retrieval (IR) based methods [@ji2014information], 2) the statistical machine translation (SMT) based methods [@ritter2011data]. IR based methods aim to pick up suitable responses by ranking candidate responses. But there is an obvious drawback for these methods that the responses are selected from a fixed response set and it is not possible to produce new responses for special inputs. SMT based methods treat response generation as a SMT problem on post-response parallel data. These methods are purely data-driven and can generate new responses. More recently, neural network based methods are being applied in this field . In particular, <span style="font-variant:small-caps;">Seq2Seq</span> model and reinforcement learning are used to improve the quality of generated responses [@li-EtAl:2016:EMNLP20162]. Adversarial learning are also applied in this field in recent years [@li-EtAl:2017:EMNLP20175]. [@serban2017hierarchical] introduced stochastic latent variable into RNN model into the response generation problem. Neural network based methods are promising for dialogue generation. However, as mentioned in section 2, training a neural network model requires a large corpus. Sometimes it is hard to obtain a large corpus in a specific domain, which limits their performance. Another kind of related work is computational humor. Humor recognition or computation in natural language is still a challenging task. Although understanding universal humor characteristics is almost impossible, there are many attempts to capture latent structure behind humor. Taylor used ontological semantics to detect humor. Yang identified several semantic structures behind humor and employed a computational approach to recognizing humor. Other studies also investigate humor with spoken or multimodal signals [@purandare2006humor]. But none of these works provide a systematical explanation of humor, not to mention recognizing humor in Chinese crosstalks. Moreover, there are several studies attempting to generate puns and jokes. For example, The JAPE system was developed to automatically generate punning riddles [@binsted1994implemented; @binsted1997computational], and it relies on a template-based NLG system, combining fixed text with slots. Following the seminal work of Binsted and Ritchie, the HAHAcronym system was developed to produce humorous acronyms [@stock2005act] and the subsequent system of [@binsted2003pun] focuses on the generation of referential jokes. More recently, an interesting unsupervised alternative to this earlier work was offered [@petrovic2013unsupervised], and it does not require labeled examples or hard-coded rules. It starts from a template involving three slots and then finds funny triples. However, the task of entertaining dialogue generation has not been investigated. Conclusions and Future Work =========================== In this paper, we investigate the possibility of automatic generation of entertaining dialogues in Chinese crosstalks. We proposed a humor-enhanced translation model to generate the replying utterance of the supporting role, given the utterance of the leading comedian in Chinese crosstalks. Evaluation results on a real Chinese crosstalk dataset verify the efficacy of our proposed model, especially the usefulness of the humor model. In future work, we will try to enlarge the dataset by exploiting dialogue data in other similar domains, aiming at further improving the performance. We will also investigate generating the utterance of the leading role in the crosstalks, given the context utterances in several previous turns of dialogues. [^1]: http://pypi.python.org/pypi/pypinyin [^2]: http://scikit-learn.org/stable/index.html [^3]: (1)Liu Yingnan. [*A Complete Collection of China Traditional Cross Talks 5 Vols.*]{}, Culture and Art Publishing House, 2010.   (2)Wang Wenzhang. [*Famous Crosstalk Actor’s Masterpiece Series*]{}, Culture and Art Publishing House, 2004. et,al. [^4]: \(1) http://www.xiangsheng.org; (2) http://www.tquyi. com; et, al. [^5]: http://weibo.com
Prevalent SLC26A4 mutations in patients with enlarged vestibular aqueduct and/or Mondini dysplasia: a unique spectrum of mutations in Taiwan, including a frequent founder mutation. The purpose of the study is to elucidate the mutation spectrum of SLC26A4 among patients with enlarged vestibular aqueduct and/or Mondini dysplasia in Taiwan and to explore the origin of the most common mutation, IVS7-2A>G. The correlation between the genotypes and the phenotypes is also investigated, with special emphasis placed on comparison between the genotypes and hearing levels. A 3-year prospective clinical genetic study at a tertiary care university hospital. Mutations on SLC26A4 were screened in 38 families that fulfilled the criteria of enrollment, and single nucleotide polymorphisms (SNPs) in the vicinity of IVS7-2A>G were typed. The presence of goiter, radiologic findings, and audiologic results of the probands were then compared according to the genotypes. A total of eight mutations were detected in 33 families, and IVS7-2A>G accounted for 84% (48/57) of the mutated alleles. SNP analysis confirmed the founder effect of IVS7-2A>G. Meanwhile, no obvious correlation was observed between SLC26A4 genotypes and phenotypes. The present study disclosed the unique SLC26A4 mutation spectrum in Taiwan, confirmed that IVS7-2A>G arose from a common ancestor, and demonstrated the lack of correlation between genotypes and phenotypes. High prevalence of certain SLC26A4 mutations in East Asians, as revealed here and previously, might largely facilitate mutation screening and genetic counseling in these areas.
Defender denies Reds talk Cesar Azpilicueta says there has been no contact from Manchester United boss Jose Mourinho and insists he is happy at Chelsea. Reports are rife that Mourinho is keen to return to Stamford Bridge and swoop for the Spain international, who has been a revelation since joining the Blues, but there has been no contact as yet. Azpilicueta is currently at Euro 2016 with the rest of the Spain squad, but was quick to deny United had been in touch when quizzed by a local radio station. When asked if Mourinho had been in touch, he replied: “No. [You can] Believe me or not.” The defender then revealed he is aware of interest in him with Athletic Bilbao also known to be keen, but reaffirmed his commitment to Chelsea ahead of the new season. “It’s always a compliment that you are wanted, I’ve heard highly about Valverde [Bilbao’s manager], but I’m focused on the national team and Chelsea, I’ve been there for four years, I am very happy and I feel highly valued,” he added. Mourinho has also been linked with Chelsea midfielder Nemanja Matic, and he looks sure to attempt some kind of raid on Stamford Bridge over the summer.
[/caption] Mars is not the dead world we once thought it was, and these images portray that fact. Stuart Atkinson (not a relative, but a good pal) is part of the UnmannedSpaceflight.com crew, the folks who have created fantastic visual treats from raw images sent back from our space exploring robots. Stu also has his own website, Cumbrian Sky . Today, Stu posted some pictures on his website, along with his story of how he came to create an utterly fantastic image from a rather ordinary-looking picture from the HiRISE camera on the Mars Reconnaisaince Orbiter. After seeing Stu’s handiwork today, I asked him if he wouldn’t mind sharing his story on Universe Today of how he came to make an amazing discovery on Mars. (Click on the image above for a larger version.) Stu said he found the image above on the “dangerously addictive” Mars Global Data website. The image, of the Aram Chaos region, didn’t look very interesting at first. But then he zoomed in to get a better look at the “slumpy” feature in the middle of the image, and then zoomed in some more. “It looked like a section of the valley wall had come loose and slid down to the lower ground below,” Stu said as he raised a Spock-like eyebrow… There, he discovered the track of a huge boulder that had “fallen from the valley wall and tumbled down, bouncing and rolling and boinging along for a long, long way.” Adding color to the image really highlighted the tracks left by the bouncing boulder, and a few other tracks showed up as well, creating an absolutley awesome image and discovery! “You know what I love about these images?,” Stu wrote on his blog. “They show movement, they show that things are moving on Mars right now. It’s not the dead, lifeless, corpse of a world I grew up with; it’s a world – we now know – where dust devils whirl and twirl across the plains… where clouds drift through the pale pink sky… and where huge stones fall from high cliff face, fall to the ground below, then bounce and roll and crash over it… unseen by anyone.” “For now.” Great detective work, Stu! I propose that this region should be named after Stuart Atkinson (at least the boulders, if nothing else!) Also, check out Stu’s pictoral history of the International Space Station, starting from when it was a wee infant back in 1998 to the great images we’ve seen of the ISS this past week.
Are You Setting Yourself Up for Burnout? Physician burnout appears to be on the rise. Here’s why, along with some ideas on how to get better control. While physician burnout has always been a problem, today’s doctors have more on their plates than doctors of previous generations, and they seem to be more affected by burnout than professionals in other fields. A recent survey compared burnout and satisfaction with work/home life balance among physicians and those employed in other professions.1 The study included 7,288 physicians who completed surveys. When assessed using the Maslach Burnout Inventory, researchers found that 45.8 percent of physicians reported at least one symptom of burnout. Compared with a probability-based sample of 3,442 adults working in other professions, physicians were more likely to have symptoms of burnout (37.9 percent compared with 27.8 percent) and to be dissatisfied with their work/life balance (40.2 percent compared with 23.2 percent). Why Do Physicians Burn Out? “There are concerns that the ever-increasing pace of practice might be contributing to burnout because physicians are being asked to see more and more patients in less and less time,” says Colin P. West, MD, PhD, FACP, from the Divisions of General Internal Medicine and Biomedical Statistics and Informatics, Departments of Internal Medicine and Health Sciences Research, at the Mayo Clinic, in Rochester, Minn. “That sort of treadmill effect may play a role. There is also concern about what will happen if the practice of medicine continues to go in those directions while social structures are changing. For example, it is much more common to have dual-career households. Fifty years ago, stereotypically, the physician might work 60 hours a week while his wife was the homemaker. There was often a more starkly defined division of responsibilities. These days, spouses and partners don’t want that, and physicians themselves don’t want that. Today, there is the shared family model that I think is a societal change as well. That makes work/life balance that much more challenging.” Additionally, there is much less division between work life and home life. With computers and smartphones, physicians may feel like they are always on call. It can be difficult to prevent work issues from invading home time. “One of my colleagues says that physicians can never disconnect from the grid,” Dr. West says. “We are connected with our smartphones, and we can remotely access into our patients’ data. There has always been some of that—physicians being on their pagers, etc.—but I think the volume may have increased.” The Internet may also play a role. While there are benefits to seeing better-educated patients, some patients may present to the office with specific expectations based on their Internet research. “Some patients may view their relationship with the physician not as a partnership but more as the patient hiring the physician as a consultant and wanting to tell the physician what should be done instead of coming to a mutually agreed-upon decision that involves both of them working together,” Dr. West notes. Helen Meldrum, EdD, an associate professor of psychology in the Program of Health Sciences and Industry at Bentley University in Waltham, Mass., agrees. “It’s only in recent times when a physician gives the patient a treatment option, and the patient says, ‘that’s not what I Googled.’ Physicians used to be able to count on a certain amount of authority, but their authority is at an all-time low because patients feel like they are well-educated by the Internet,” she says. The business side of medicine may also be partly to blame. “When doctors go to medical school, they are looking forward to the chance to have relationships with patients,” Dr. West says. “Anything that gets in the way of those relationships can contribute to dissatisfaction and burnout. We have focused our work in the past decade on trying to document the problem, and it has been a really interesting transition in recent years. When we first started our group’s work, we would receive comments that people didn’t believe it was a significant issue. Now, we are getting a lot more acceptance that this is a pervasive problem.” A recent study found that surgeons in private practice were more likely to experience burnout than their colleagues in academic practice.2 The study compared distress parameters and career satisfaction from survey results of surgeons in 14 specialties. It found that academic surgeons were less likely to screen positive for depression or to have suicide ideation. They were also more likely to experience career satisfaction and to recommend a medical career to their children. For academic surgeons, the most significant positive associations with burnout were being a trauma surgeon, number of nights on call and hours worked. For private practice surgeons, the most significant associations with burnout were being a urologic surgeon, having 31 percent to 50 percent time for nonclinical activities, incentive-based pay, nights on call and hours worked. Additionally, physicians may be putting unnecessary pressure on themselves because of their personality types. “There have been a couple of articles written over the years on the compulsive personality type and the contribution of perfectionism and excessive sense of responsibility of physicians,” Dr. West says. “The people who are selected to go to medical school and self-select to go into medical school are people who tend to have perfectionist tendencies. One definition of burnout that I heard is the distance between what a person is able to do and what he feels he should be doing. Because the bar is very high for doctors, in terms of their own bar and society’s bar, it makes it difficult for doctors to live up to their own high expectations as well as the external expectations. As the number of patients increases and the amount of things the doctor feels like she needs to be doing increases, the possibility of living up to her own high expectations becomes smaller, and that creates conditions for burnout as well.” Dr. Meldrum adds that debt may be a contributing factor because many physicians are taking on consulting jobs to pay off their medical school debt. “Many physicians are taking on extra consulting jobs just because they are so saddled with debt,” she says. “If you work inhuman hours, where are your friends and family in the picture? They are unhappier with you than ever. When doctors are home, they are often cranky and overstressed and tired.” Dr. West and his colleagues conducted a study to evaluate the relationship between well-being and demographics, educational debt and medical knowledge.3 The study included 16,187 residents. In this study, sub-optimal quality-of-life and symptoms of burnout were common, and higher debt (more than $200,000) was associated with symptoms of burnout. “Then, there are the physicians who are older in a supervising role and their direct reports have staff to manage,” Dr. Meldrum says. “The stress of not having a support staff that has good interpersonal skills means that more and more things get bumped upstairs. Additionally, patients are more demanding, so physicians are hearing more complaints than they did in the past.” Prevention Strategies Fortunately, there are many things physicians can do to prevent burnout or to turn things around once they start feeling burned out. Dr. Meldrum conducted a study on physicians’ strategies for avoiding burnout and found that some helpful techniques for avoiding burnout included setting limits, sharing issues with friends and family, physical exercise, cultivating relaxation and humor.4 Matthew J. Goodman, MD, an internist and co-director of the University of Virginia’s Mindfulness-Based Stress Reduction Program in Charlottesville, Va., recommends the following: • Foster healthy family relationships and friendships. • Keep hobbies alive. • Have a religious or spiritual affiliation. • Look to find meaning in your work. • Find a mentor. Dr. Goodman notes that, because there is growing awareness about physician burnout, there are groups of physicians who are looking at ways to get together. “Physicians rarely talk to each other except about medicine and patients, but it can be helpful to become part of a group of like-minded physicians who can serve as a support group or share experiences.” Dr. Goodman and his colleagues have done some work with teaching mindfulness to physicians, and data is becoming available on mindfulness classes to help with physician self-awareness and developing the ability to maintain a sense of calm or to be in difficult circumstances and not get carried away by them.5 Dr. Goodman’s study included 93 health-care providers, including physicians from multiple specialties, nurses, psychologists and social workers who practice in both university and community settings. The health-care providers attended a continuing-education course based on mindfulness-based stress reduction. The course met 2.5 hours a week for eight weeks plus a seven-hour retreat. The classes included training in four types of formal mindfulness practices, including the body scan, mindful movement, walking meditation and sitting meditation. Providers’ Maslach Burnout Inventory scores improved significantly from before to after the course for both physicians and other health-care providers for the Emotional Exhaustion, Depersonalization and Personal Accomplishment scales. Mental well-being also improved significantly. He notes that, while mid-career physicians are at risk, burnout can happen at any age or career stage. “Mid-career physicians have been practicing medicine long enough that some of the novelty has worn off,” Dr. Goodman says. “Some of their idealism is being challenged by what they are expected to do. This is a group that is really is need of self-renewal. In our classes, we have had physicians from all stages. There is a certain amount of burnout even among residents.” Dr. Meldrum recommends keeping close tabs on your office environment and quickly addressing any conflicts that arise. “A lot of times, I get called in because the collegial relationships have gotten so petty that it’s causing burnout in the office,” she says. “People are negatively complaining behind other people’s backs about certain staff being treated better than other staff members. If you notice that you are getting up later and later and are not looking forward to going to work, you know that there is growing resistance. When people ‘clock watch’ in pairs and packs in the practice setting, that’s another sign of toxicity. If the office has become a toxic environment, physicians really need to invest in some additional training for everyone at all levels.” Other important ways to avoid burnout are to maintain a healthy work/home life balance and to spend as much work time as possible treating patients, rather than performing administrative tasks. “We know that if you take away autonomy and control from physicians, they do worse,” Dr. West says. “So, the idea that physicians should be told when to see patients and how much time they get with each patient doesn’t work well. We know that physicians who work themselves into the ground don’t have anything left for their patients, so workload is part of this. We know that things that distract physicians from the interpersonal relationships, such as excessive paperwork and excessive administrative burdens, are not good for physician well-being. We know that physicians who work to the point that it detracts from their home lives develop problems. I don’t know that anyone has the magic solution to work/personal life balance, but physicians need to be aware of where they are on their own stress curves.” Additionally, physicians need to remember that by taking care of themselves, they are actually caring for their patients. REVIEW
Q: Use Ctrl Alt Num Pad to position Windows on Screen not desktop So far I could position any window using the Ctrl + Alt + [num-pad-key] on the screen. Even with multiple presses I could change the size in the corner/side. Now with 17.10 on Xorg the Ctrl + Alt + [num-pad-key] positions the window on the whole desktop rather than a single screen. That is very useless to me with a 3 screen setup. Any idea how to make these shortcuts work per screen and that they scale the window? A: The same question is discussed here: How do I restore the Unity 7 Ctrl-Alt-Numpad combination behaviour on Gnome 3? Basically, what I have tried from this thread (and that worked) is this: unset the problematical keybindings from gnome: gsettings set org.gnome.desktop.wm.keybindings move-to-side-n "['disabled']" gsettings set org.gnome.desktop.wm.keybindings move-to-corner-ne "['disabled']" gsettings set org.gnome.desktop.wm.keybindings move-to-side-e "['disabled']" gsettings set org.gnome.desktop.wm.keybindings move-to-corner-se "['disabled']" gsettings set org.gnome.desktop.wm.keybindings move-to-side-s "['disabled']" gsettings set org.gnome.desktop.wm.keybindings move-to-corner-sw "['disabled']" gsettings set org.gnome.desktop.wm.keybindings move-to-side-w "['disabled']" gsettings set org.gnome.desktop.wm.keybindings move-to-corner-nw "['disabled']" with sw, nw, ne, se for corners, and n, e, s, w for sides. Then I have tried installing the gnome extension called Put Windows. Using this extension options (refresh page if you don't see the option button next to the on/off button), I was able to set the correct keybindings I wanted. If some keybindings still do not work (that was my case for the move-windows-monitor-left/right commands), you can try to install ccsm : sudo apt install compizconfig-settings-manager And set the remaining keybindings under the "Window Management" section, in "Grid".
Q: how todays google doogle World's Fair is implemented? I wanna know is it using the power of HTML5 and jquery any sample example can i get for this ? A: When examining the code, you will actually see that the doodle consists of multiple images: http://www.google.com/logos/2011/worldsfair11-hp-1.gif http://www.google.com/logos/2011/worldsfair11-hp-2.gif http://www.google.com/logos/2011/worldsfair11-hp-3.gif http://www.google.com/logos/2011/worldsfair11-hp-4.gif http://www.google.com/logos/2011/worldsfair11-hr.jpg - the actual static Doogle http://www.google.com/logos/2011/worldsfair11-hp.png The beautified JavaScript is here: http://pastebin.com/r8s9H2jA It seems to use JavaScript to create the loupe and uses a combination of CSS / JavaScript to position the animating elements at the right position. So in short: HTML5: It's JavaScript combined with CSS. This, by itself, is not limited to the HTML5 spec, so no. jQuery: no.
package sample.httpjs; import io.undertow.server.HttpServerExchange; import io.undertow.util.Headers; import org.nustaq.kontraktor.*; import org.nustaq.kontraktor.annotations.Local; import org.nustaq.kontraktor.impl.SimpleScheduler; import org.nustaq.kontraktor.util.Log; import java.util.Arrays; import java.util.Date; import java.util.List; /** * Created by ruedi on 29/05/15. * * Example Single Page Server with actors talking to JavaScript. * * This class provides the stateless server API exposed to JS. By calling login, * the client can authenticate and instantiate a dedicated SessionActor instance * which then expose per-client api and server side state. (see ../../web/index.html) * */ public class MyHttpApp extends Actor<MyHttpApp> { public static final int CLIENT_QSIZE = 1000; // == preallocate a queue of CLIENT_QSIZE/10 for each session private Scheduler clientThreads[]; private int sessionCount = 0; public IPromise<String> getServerTime() { return new Promise<>(new Date().toString()); } @Local public void init() { // you won't need many threads. If there is heavy computing or you do blocking operations (hint: don't) // inside a session actor, increase this. It will still work with hundreds of threads (but then you get jee ;) ) clientThreads = new Scheduler[]{ new SimpleScheduler(CLIENT_QSIZE,true), // only two session processor threads should be sufficient for small apps. new SimpleScheduler(CLIENT_QSIZE,true), }; Thread.currentThread().setName("MyHttpApp Dispatcher"); } public IPromise<MyHttpAppSession> login( String user, String pwd ) { Promise result = new Promise<>(); if ( "admin".equals(user) ) { // deny access for admin's result.reject("Access denied"); } else { // create new session and assign it a random scheduler (~thread). Note that with async nonblocking style MyHttpAppSession sess = AsActor(MyHttpAppSession.class,clientThreads[((int) (Math.random() * clientThreads.length))]); sess.init( self(), Arrays.asList("procrastinize", "drink coffee", "code", "play the piano", "ignore *") ); result.resolve(sess); sessionCount++; } return result; } public IPromise<Integer> getNumSessions() { return resolve(sessionCount); } @Local public void clientClosed(MyHttpAppSession session) { sessionCount--; System.out.println("client closed "+session); } /** * inlink e.g. from a mail sent .. * @param exchange */ public void handleDirectRequest(HttpServerExchange exchange) { Log.Info(this,"direct link path:"+exchange.getRelativePath()); exchange.setResponseCode(200); exchange.getResponseHeaders().put(Headers.CONTENT_TYPE, "text/html; charset=utf-8"); exchange.getResponseSender().send( "Hello there, "+exchange.getRequestPath() ); } }
Q: How to monitor a spring-boot application via JMX? I'm trying to set up a JMX monitoring for a comand line app build with spring-boot. According to https://github.com/spring-projects/spring-boot/tree/master/spring-boot-actuator I just have to add the dependency: <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-actuator</artifactId> </dependency> Now I start my app, open VisualVM and I already see my application PID. But how can I now access the metrics like /health etc that are mentioned on the actuator page? As this is a comand line app, I don't have any HTTP path that I can execute. How can I see those metrics via JXM? A: If you want to access JMX beans with JVisualVM, you need to install the VisualVM-MBeans plugin (go to Tools -> Plugins -> Available Plugins). You can also use JConsole, which can access JMX beans by default.
1. Technical Field The present invention relates to a biomagnetic measurement apparatus, including a SQUID (Superconducting Quantum Interference Device) fluxmeter that measures weak magnetic signals generated from the heart, the brain, etc. of adults, children, fetuses, or the like. In particular, the present invention relates to the biomagnetic measurement apparatus having means for detecting and avoiding a condition of saturation, when an integrator of a fluxmeter operation circuit is saturated with noise and thereby magnetic measurements cannot be conducted. 2. Background Art The SQUID fluxmeter has experienced problems such as magnetic field measurement errors, which are caused by changes of output signals of an integrator when phase changes or amplitude changes of amplifier outputs occur due to variations of temperature, power source, or other factors. In order to solve this problem, JP Patent Publication (Kokai) No. 9-257895 A (1997) discloses, in a fluxmeter using SQUID, a magnetometer comprising a comparator that compares an output from a SQUID with preset upper and lower limits. This magnetometer notifies the SQUID or a drive circuit of abnormalities when the output is beyond the upper or lower limits. In addition to phase changes and amplitude changes of amplifier outputs, which are regarded as problems according to JP Patent Publication (Kokai) No. 9-257895 A (1997), when the SQUID fluxmeter detects further noise greater than a biomagnetic signal, an integrator of a fluxmeter operation circuit becomes saturated and a problem arises wherein a magnetic field lock of a magnetic sensor is released. When the magnetic field lock is released, the output of the fluxmeter does not revert back, even after the causative noise for releasing the lock disappears, and it is impossible to continue the measurement. Therefore, it is necessary to discover that the magnetic field lock is released as early as possible, to notify an operator of the above saturated state (detection of the release of the magnetic field lock), to take required measures to resolve the saturated state, for example, manually or automatically, and to conduct operations for the magnetic field lock again, thereby creating a state for enabling magnetic measurements.
By now, just about everybody in Central Florida who loves Pizza and just loves good food in general, has heard about or has been to Pizza Bruno. If you have not been yet, or are in the minority of people who don't know about one of Orlando’s best pizza places, then this article is for you. Pizza Bruno was opened as a Neapolitan style pizzeria that is a hidden gem off the beaten path where tradition meets creativity. Pizza Bruno isn't just a pizza place, it's a passion with Italian roots and family traditions from Chef/Owner, Bruno Zacchini who developed a passion and philosophy for great food. Although Pizza Bruno is wildly popular, it has for the most part, remained relatively underground in the Orlando area, but that is part of the craze and lure of such an establishment. Often times places like this become the stuff of legends and folklore, and when you finally get to try it, you are let down. That is not the case here. Chef Bruno and his team are doing their best to keep the food and experience magical. The Neapolitan style pizzas are made with a simple dough, fermented at cold temperatures for up to 48 hours, topped with the best ingredients and cooked in 90 seconds at temperatures of up to 1000 degrees. The base dough and sauce are both vegan, and they also offer Daiya Vegan mozzarella as well as a gluten free crust for a $2.00 up charge. Just as Italian food has always emphasized a simplicity in flavors and ingredient driven dishes, Pizza Bruno treats their pizzas the same. From the delicious California tomatoes & sea salt they use in the sauce to the locally sourced Italian sausage they use, they pride themselves on letting the ingredients shine and the food speak for itself. The Pies here are baked in a wood fired oven that is fed with kiln dried red oak that helps the food take on a beautiful leopard spotted char and get that unique wood fired flavor. They do serve more than pizza at Pizza Bruno, and the garlic knots have become legendary...if you can get them, as they are made in limited batches. Other items include, Octopus, Wood Fired Cauliflower, Charcuterie and Cheese boards, Meatballs and salads. Beer, Wine, and Amaro cocktails are served up at Pizza Bruno. They have a $20 corkage fee per 750ml of wine or beer. This is my favorite local craft brewery Crooked Can with Cloud Chaser. The Heirloom Caprese with local heirloom tomatoes, fresh Mozzarella, basil and balsamic glaze is a great way to start your visit, and it’s big enough to share. Wood Fired Cauliflower is served in a hazelnut romesco sauce and topped with golden raisin agrodolce and Pecorino Romano, and it is simply amazing. Quite simply one of the best Octopus dishes I have ever had. The octopus at Pizza Bruno is wine braised, wood-fire roasted and served with a seasonal vegetable accompaniment. This Blanca Pizza with Mozzarella, Lemon Ricotta, Garlic, Rosemary, Arugula, and Pecorino Romano is just fantastic and full of great flavors. Do not miss out on any of the Cannoli’s served up at Pizza Bruno, they are all next level delicious. This is a Nutella Cannoli with nutella, chocolate chips and sliced almonds. This is a Classic Cannoli with a classic Cannoli filling, chocolate chips and powdered sugar. I saved the best for last, with this Caramel Delight Cannoli that is out of this world wonderfully delicious with coconut mascarpone, chocolate covered cannoli shell, toasted coconut and caramel sauce..,it’s pretty much a cannoli from heaven above. Chef Bruno and his staff strives to push creativity & raise the bar for the food they serve up here, to bring Orlando something different & unique and keep up a cult like following. This place is just plain good and you will not regret anything on the menu. Pizza Bruno does not have a phone line, so they do not deliver, nor can you call ahead and place a take out order. They do however, allow for you to order to go with in house takeout, online ordering, and they will deliver via Uber Eats only. And yes, I went later in the evening on a Saturday night, and they were out of the legendary garlic knots...It happens, so I'll Be Back...Soon!
Student-athletes are more comfortable approaching informal sources of help rather than professional mental health practitioners. Informal sources such as parents, close friends, and siblings are their primary preferred helpers. The least likely to be approached for help are professional help-givers like faculty, religious members, and counselors. The barriers to help-seeking for student-athletes are more internal (e.g., win-at-all- cost philosophy, time management, social stigma, debasing potential concerns, lack of transportation to the counseling venue) than external (e.g., barriers created by the athletic department and the University). PROBLEM STATEMENT This study explored the help-seeking behavior of male college basketball players, the context in which it takes place, the process involved in seeking help, the specific barriers to help-seeking and the possible intervention that would facilitate help-seeking. Specifically, this study answered the following questions: What is the context in which help-seeking behavior occurs among male college basketball players? (Concept of help-seeking; Typical problems; Frequency of seeking help; Preferred helpers and their qualities; and Venue for help-seeking) What is the process involved in seeking help? What are the barriers to seeking help? (Internal and External barriers) Based on the responses, what intervention can be introduced to facilitate help-seeking behavior among student-athletes? What is the result of the pilot run of the intervention to promote help-seeking behavior? (Barriers to help-seeking before and after; Feedback of participants) THEORY This study looked into the help-seeking behavior of male basketball players from different colleges and universities in Metro Manila. Specifically, it looked into the context, process, barriers, and interventions in help-seeking behavior. It was guided by the help- seeking model originally proposed by Srebnik, Cauce, and Baydar (1996). For the purpose of this study, modifications were introduced to include context, barriers, and interventions. Help-Seeking Behavior Cauce et al. (2002) defined help-seeking as a process that “is most likely to occur when a mental health problem is recognized as undesirable and when it is deemed not apt to go way on its own.” It is a three-stage interchangeable process, which involves problem recognition, the decision to seek help, and the selection of the help provider, since each step in this model can be influenced by culture and context. The help-seeking pathways model of Srebnik, Cauce, and Baydar (1996) was inspired by the earlier work of Anderson and Newman (1973), Goldsmith, Jackson, and Hough (1988), and Pescosolido (1992). Cauce et al. (2002) believed that the key to understanding the needs of a client should start at the onset of the help-seeking process. Context Defined as the background stimuli that accompany some kind of foreground event. In this study, it pertains to the concept of help-seeking among the student-athletes; their problems, which prompt them to seek help; preferred helpers and qualities; frequency of seeking help; and venue for seeking help. Process Process is operationally defined in this study as the procedure involved in seeking help. The three-stage interchangeable process proposed by Cauce et al. (2002) was used in this study. Barriers in Help-Seeking Behavior The working definition and description of barriers were adapted from Mansfield et al. (2005) for this study. The authors described barriers as “the variables, which are identified as obstacles to seeking help for physical or mental health problems.” In this study, barriers were classified into two categories: internal and external. Intervention Describes the proposed program that will facilitate help-seeking behavior for either the student-athlete or preferred helper based on the study’s findings. RESULTS Concept of Help Seeking Behavior Definition: “Seeking help for a problem that cannot be individually solved.” (N = 58; 53%) 3 of the 5 factors received mean ratings of 2 plus which means that they are fairly important reasons not to seek help. Factor 4: Privacy, with a mean score of 2.115, was seen as the primary barrier, Factor 2: Minimizing Problem and Resignation (2.086), and Factor 1: Need for Control and Self-Reliance (2.012). Suggestions to Encourage Student-Athletes to Seek Help Encourage them not to be too proud, shy, or scared in approaching others (33%/16%) Give advice and encouragement (20%/11%) Have a program on communicating/counseling athletes (0%/21%). DISCUSSION Process: Help-Seeking Behavior Cauce, et al. (2002) defined help-seeking as a process that “is most likely to occur when a mental health problem is recognized as undesirable and when it is deemed not apt to go way on its own.” It is a three-stage interchangeable process, which involves problem recognition (PR), the decision to seek help (DTSH), and the selection of the help provider (SS) as each step in this model can be influenced by culture and context. The authors developed items based on literature in the areas of gender-role strain and gender-role conflict. Based on these findings, an inventory of potential barriers to help-seeking was constructed to identify the “reason a person might choose not to seek help for a problem.” UNIQUE CONTRIBUTION Working Definition Concept of help-seeking; Typical problems; Frequency of seeking help; Preferred helpers and their qualities; and Venue for help-seeking. Program Manual Program was designed to enhance help-seeking behavior and practices among collegiate basketball student-athletes who participate in the UAAP and NCAA tournaments. Program focuses on reducing deterrent help-seeking behavior through the use of psychoeducation and role induction (anticipatory socialization). Reduce inhibitions by providing the student-athletes with knowledge of the subject, communication skills, and time management strategies.
Project Summary/Abstract Improving Patient-Centered Communication in Breast Cancer: A RCT of a Shared Decision Engagement System (ShaDES) The diagnosis of breast cancer triggers a cascade of decisions as patients consider multiple treatment modalities navigated by different specialists. Precise evaluative treatment algorithms have better individualized treatment recommendations, [yet sifting through the complexity of the test information and treatment options can be often challenging to patients and can often cause anxiety]. Thus, the advances of precision medicine cannot be realized without parallel advances in patient-centered communication (PCC). This rapidly evolving decision context has fueled a pressing need for more patient-centered communication to address the full breadth of issues?both cognitive and emotional?faced by patients in making breast cancer treatment decisions. There is a critical need for tools that can engage the patient both emotionally and cognitively and be integrated into the breast oncology care clinical workflow. This project is a multi-level, factorial study that crosses a patient-level RCT of 700 newly-diagnosed breast cancer patients within 25 breast surgical oncology practices to evaluate a shared decision engagement system (ShaDES) to support PCC. The system links an emotional support-enhanced version of the research group?s previously developed iCanDecide patient-facing decision tool with a clinic level trial of a Clinician Dashboard to help clinicians address remaining cognitive and emotional needs in their patients. In collaboration with the Alliance NCORP Research Base and its Statistics and Data Core, the trial will: 1) evaluate the impact of the emotional support enhancements to iCanDecide on primary and secondary outcomes measuring patient appraisal of PCC, 2) evaluate the impact of the Clinician Dashboard on patient appraisal of PCC, 3) examine potential mediators of the patient and clinic interventions, and 4) conduct a process evaluation of the two intervention components to inform revision and future widespread implementation of ShaDES. [The results will lay the groundwork for broad implementation of a shared decision engagement system to improve patient-centered communication in breast cancer.]
During transportation, etc. of frozen or refrigerated foods, if these foods accidentally meet a temperature of the predetermined temperature or over, these frozen or refrigerated foods could inadvertently be deteriorated or decomposed. Therefore, strict temperature control management should be carried out to preserve the frozen or refrigerated foods. In the field of medical care, temperature control in preserving particular kind of medicinal drugs, blood, specimen, etc. play an important role. These products could deteriorate rapidly when the temperature rises, and such products may not be used if things go wrong. For controlling such temperature, heat-sensitive indicators that irreversibly indicate a signal in a case where a temperature rises to or over a predetermined temperature, have been used so far. Checking these heat-sensitive indicators allows us to know whether or not the storage temperature during transportation exceeded the upper limit of a temperature control range. Such heat-sensitive indicators are disclosed in Patent Documents 1-3 described below. The heat-sensitive indicator disclosed in Patent Documents 1-3 comprises: a colored hot-meltable substance which melts at a predetermined temperature; an absorber which absorbs or is permeated with the melt of the colored hot-meltable substance; and a separating member such as separating membrane, etc. which maintains the colored hot-meltable substance away from the absorber without contacting to each other. At the time the heat-sensitive indicator is used, the separating member is broken under a temperature lower than the melting temperature of the colored hot-meltable substance, bringing the colored hot-meltable substance into contact with the absorber. When the colored hot-meltable substance is melted by raising the temperature of an environment in which the heat-sensitive indicator is placed, then the molten material of the colored hot-meltable substance is absorbed into the absorber, thereby the absorber is colored. The color on the absorber does not disappear even when the temperature of the environment is brought down to a temperature lower than the melting point of the colored hot-meltable substance.
In electrophotographic and electrostatic printing processes (collectively electrographic processes), an electrostatic image is formed on the surface of a photoreceptive element or dielectric element, respectively. The photoreceptive element or dielectric element may be an intermediate transfer drum or belt or the substrate for the final toned image itself, as described by Schmidt, S. P. and Larson, J. R. in Handbook of Imaging Materials Diamond, A. S., Ed: Marcel Dekker: New York; Chapter 6, pp 227–252, and U.S. Pat. Nos. 4,728,983, 4,321,404, and 4,268,598. In electrostatic printing, a latent image is typically formed by (1) placing a charge image onto a dielectric element (typically the receiving substrate) in selected areas of the element with an electrostatic writing stylus or its equivalent to form a charge image, (2) applying toner to the charge image, and (3) fixing the toned image. An example of this type of process is described in U.S. Pat. No. 5,262,259. In electrophotographic printing, also referred to as xerography, electrophotographic technology is used to produce images on a final image receptor, such as paper, film, or the like. Electrophotographic technology is incorporated into a wide range of equipment including photocopiers, laser printers, facsimile machines, and the like. Electrophotography typically involves the use of a reusable, light sensitive, temporary image receptor, known as a photoreceptor, in the process of producing an electrophotographic image on a final, permanent image receptor. A representative electrophotographic process, discharged area development, involves a series of steps to produce an image on a receptor, including charging, exposure, development, transfer, fusing, cleaning, and erasure. In the charging step, a photoreceptor is substantially uniformly covered with charge of a desired polarity to achieve a first potential, either negative or positive, typically with a corona or charging roller. In the exposure step, an optical system, typically a laser scanner or diode array, forms a latent image by selectively discharging the charged surface of the photoreceptor to achieve a second potential in an imagewise manner corresponding to the desired image to be formed on the final image receptor. In the development step, toner particles of the appropriate polarity are generally brought into contact with the latent image on the photoreceptor, typically using a developer electrically-biased to a potential of the same polarity as the toner polarity and intermediate in potential between the first and second potentials. The toner particles migrate to the photoreceptor and selectively adhere to the latent image via electrostatic forces, forming a toned image on the photoreceptor. In the transfer step, the toned image is transferred from the photoreceptor to the desired final image receptor; an intermediate transfer element is sometimes used to effect transfer of the toned image from the photoreceptor with subsequent transfer of the toned image to a final image receptor. The image may be transferred by physical pressure and contact of the toner, with selective adhesion to a target intermediate or final image receptor as compared to the surface from which it is transferred. Alternatively, the toner may be transferred in a liquid system optionally using an electrostatic assist as discussed in more detail below. In the fusing step, the toned image on the final image receptor is heated to soften or melt the toner particles, thereby fusing the toned image to the final receptor. An alternative fusing method involves fixing the toner to the final receptor under pressure with or without heat. In the cleaning step, residual toner remaining on the photoreceptor is removed. Finally, in the erasing step, the photoreceptor charge is reduced to a substantially uniformly low value by exposure to light of a particular wavelength band, thereby removing remnants of the original latent image and preparing the photoreceptor for the next imaging cycle. Two types of toner are in widespread, commercial use: liquid toner and dry toner. The term “dry” does not mean that the dry toner is totally free of any liquid constituents, but connotes that the toner particles do not contain any significant amount of solvent, e.g., typically less than 10 weight percent solvent (generally, dry toner is as dry as is reasonably practical in terms of solvent content), and are capable of carrying a triboelectric charge. This distinguishes dry toner particles from liquid toner particles. A typical liquid toner composition generally includes toner particles suspended or dispersed in a liquid carrier. The liquid carrier is typically nonconductive dispersant, to avoid discharging the latent electrostatic image. Liquid toner particles are generally solvated to some degree in the liquid carrier (or carrier liquid), typically in more than 50 weight percent of a low polarity, low dielectric constant, substantially nonaqueous carrier solvent. Liquid toner particles are generally chemically charged using polar groups that dissociate in the carrier solvent, but do not carry a triboelectric charge while solvated and/or dispersed in the liquid carrier. Liquid toner particles are also typically smaller than dry toner particles. Because of their small particle size, ranging from sub-micron to about 5 microns, liquid toners are capable of producing very high-resolution toned images. A typical toner particle for a liquid toner composition generally comprises a visual enhancement additive (for example, a colored pigment particle) and a polymeric binder. The polymeric binder fulfills functions both during and after the electrophotographic process. With respect to processability, the character of the binder impacts charging and charge stability, flow, and fusing characteristics of the toner particles. These characteristics are important to achieve good performance during development, transfer, and fusing. After an image is formed on the final receptor, the nature of the binder (e.g. glass transition temperature, melt viscosity, molecular weight) and the fusing conditions (e.g. temperature, pressure and fuser configuration) impact durability (e.g. blocking and erasure resistance), adhesion to the receptor, gloss, and the like. In addition to the polymeric binder and the visual enhancement additive, liquid toner compositions can optionally include other additives. For example, charge directors can be added to impart an electrostatic charge on the toner particles. Dispersing agents can be added to provide colloidal stability, aid fixing of the image, and provide charged or charging sites for the particle surface. Dispersing agents are commonly added to liquid toner compositions because toner particle concentrations are high (inter-particle distances are small) and electrical double-layer effects alone will not adequately stabilize the dispersion with respect to aggregation or agglomeration. Release agents can also be used to help prevent the toner from sticking to fuser rolls when those are used. Other additives include antioxidants, ultraviolet stabilizers, fungicides, bactericides, flow control agents, and the like. U.S. Pat. No. 4,547,449 to Alexandrovich, et al. discloses liquid electrographic developers comprising an electrically insulating liquid carrier, toner, a charge-control agent and a charging agent. The charge-control agent is a carrier-soluble, addition copolymer of a quaternary ammonium salt monomer, a monomer having —COOH, —SO3H or —PO3HR acidic function wherein R is hydrogen or alkyl, and a solubilizing monomer. The charging agent is a carrier-soluble, addition polar copolymer. The disclosed developers are stated to exhibit improved replenishability as evidenced by reduced buildup of charge in the developers during the course of use and repeated replenishment. Specifically, this patent noted that the prior art exhibited drawbacks relating to the stability of their charge as they are used through a number of copy sequences. In particular, the charge of the developer per unit of mass of dispersed toner of the prior art increases, indicating that the quaternary ammonium charge-control copolymer deposits on an electrostatic image at a lower rate than the toner. This uneven depletion rate and consequential increase in charge per unit mass in the developer presents difficulty in developer replenishment and causes nonuniform image density from copy to copy. The invention as described therein is asserted to stabilize the charge of the developer per unit mass of toner is so that, after a period of use, the buildup of charge per unit of mass is significantly reduced. Such stability is stated to be achieved when the quaternary ammonium salt charge-control polymer in the developer composition contains an insolubilizing monomer having an acidic function selected from the group consisting of —COOH, —SO3H or —PO3HR acidic function wherein R is hydrogen or alkyl. Charge directors, including certain quaternary ammonium salts, are disclosed in Beyer, U.S. Pat. No. 3,417,019 and Tsuneda, U.S. Pat. No. 3,977,983 for liquid developers. U.S. Pat. No. 5,627,002 to Pan, et al. discloses a positively charged liquid developer comprised of a nonpolar liquid, thermoplastic resin particles, pigment, a charge control agent, and a charge director comprised of a cyclodextrin or a cyclodextrin derivative containing one or more organic basic amino groups. This patent states that the hollow interiors provide these cyclic molecules with the ability to complex and contain, or trap a number of molecules or ions, such as positively charged ions like benzene ring containing hydrophobic cations, which are known to insert themselves into the cyclodextrin cavities. U.S. Pat. No. 5,411,834 to Fuller discloses a negatively charged liquid developer comprised of thermoplastic resin particles, optional pigment, a charge director, and an insoluble charge adjuvant comprised of a copolymer of an alkene and an unsaturated acid derivative. The acid derivative contains pendant fluoroalkyl or pendant fluoroaryl groups, and the charge adjuvant is associated with or combined with said resin and said optional pigment. In certain embodiments, it is stated that “it is important that the thermoplastic resin, copolymers with pendant fluorinated groups as illustrated herein, and the optional second charge adjuvant be sufficiently compatible that they do not form separate particles, and that the charge adjuvant be insoluble in the hydrocarbon to the extent that no more than 0.1 weight percent be soluble in the nonpolar liquid.” See column 8, lines 44–50. U.S. Pat. No. 6,018,636 to Caruthers discloses an imaging system wherein changes in toner developability of toners in a liquid toner system are determined and compensated for by sensing the toner concentration and liquid toner volume in a tank, based on changes in the toner concentration and toner mass in the tank. Based on measurements made of the toner and/or a test printed image, adjustments can be made, such as creating a new voltage differential or adding toner and/or liquid carrier material to the tank. U.S. Pat. No. 5,722,017 to Caruthers discloses a liquid developing material replenishment system wherein an apparatus for developing an electrostatic latent image with a liquid developing material includes a liquid developing reservoir for providing a supply of operative liquid developing material to the developing apparatus, and a liquid developing material supply is coupled to the liquid developing material reservoir for providing a supply of liquid developing concentrate to the liquid developing material reservoir for replenishing the supply of operative liquid developing material in the liquid developing reservoir. A developed image having a large proportion of printed image area or having substantially a single color will cause a greater depletion of marking particles and/or charge director in the liquid developing material supply tank as compared to a developed image with a small amount of printed image area or of a single color. This patent explains that while the rate of the replenishment of the liquid developing material may be controlled by simply monitoring the level of liquid developer in the supply reservoir 116, in advanced systems the rate of replenishment of the liquid carrier, the marking particles, and/or the charge director components of the liquid developing material is controlled in a more sophisticated manner to maintain a predetermined concentration of the marking particles and the charge director in the operative solution stored in the supply reservoir 116. One exemplary replenishment systems of this nature include systems which measure the conductivity of the operative liquid developing material and add selective amounts of charge director compound to the reservoir as a function of the measured a conductivity, as disclosed in detail in U.S. Pat. No. 4,860,924, incorporated by reference herein. Another system of this nature is disclosed in commonly assigned U.S. patent application Ser. No. 08/551,381, also incorporated by reference herein, which describes control of the amount of carrier liquid, charge director and/or marking particles in a liquid developing material reservoir in response to the amount of each component depleted therefrom as a function of the number of pixels making up each developed image.See column 14, line 48 to column 15, line 3. U.S. Pat. No. 4,860,924 to Simms, et. al. discloses a copier wherein charge director is supplied to a liquid developer in response to a conductivity measurement thereof. Toner concentrate deficient in charge director is supplied to the liquid developer in response to an optical transmissivity measurement thereof. Conductivity is measured by a pair of spaced electrodes immersed in the developer and through which a variable alternating current is passed. A variable capacitor neutralizes the inherent capacitance of the electrodes. A phase sensitive detector is provided with a reference voltage having the same phase shift as that caused by capacitive effects. The conductivity measurement is corrected in response to a developer temperature measurement. U.S. Pat. No. 4,935,328 to El-Sayed discloses an electrostatic liquid developer stated to have improved negative charging characteristics consisting essentially of (A) nonpolar liquid having a Kauri-butanol value of less than 30, present in a major amount, (B) thermoplastic resin particles having an average by area particle size of less than 10 μm, (C) charge director compound, and (D) at least one soluble solid or liquid organic monofunctional amine compound of the formula: Rn NH3-n wherein R is alkyl, cycloalkyl or alkylene, or substituted alkyl, the alkyl, cycloalkyl, alkylene or substituted alkyl group being of 1 to 50 carbon atoms, and n is an integer of 1 to 3. The electrostatic liquid developer is useful in copying, making proofs including digital color proofs, lithographic printing plates, and resists.
Q: Making TodoList in React, how to cross out item when finished (not using index)? I'm new to React and learning how to make a Todo App. I need to know how to cross out a todo once the checkbox input is clicked, and uncrossed once clicked again (toggle) in a controlled environment. I've put in my research; the thing is, everyone seems to be using index, while I have heard not to use index as it leads towards problems. Any help would be greatly appreciated; here's my code below: App (parent component) // dependencies import React from 'react'; // local files import './App.css'; import List from '../components/List'; class App extends React.Component { // state state = { todos: [ {key: 0, text: "Go shopping", isCompleted: false}, {key: 1, text: "Lift", isCompleted: false}, {key: 2, text: "Go for walk", isCompleted: false} ], currentTodo: { key: "", text: "", isCompleted: false } }; // event handlers // JSX render() { return ( <div className="App"> <div className="title"> TodoList </div> <List todos={this.state.todos} /> </div> ); }; }; export default App; List Component: // dependencies import React from 'react'; // local files const List = (props) => { const TodoList = props.todos.map((todo) => { return ( <div className="TodoItem" key={todo.key}> {todo.key} {todo.text} <input type="checkbox" /> </div> ); }); return ( <div className="Show-List"> {TodoList} </div> ); }; export default List; A: For update operation you can use index, there is no issue A unique key is required for list of items, so that react can React identify which items have changed Keys help React identify which items have changed, are added, or are removed. Keys should be given to the elements inside the array to give the elements a stable identity ( Ref ) Hope the below code snippet help you: const TodoList = props.todos.map((todo.index) => { return ( // index is not a good option here, here you should use unique key only <div className="TodoItem" key={todo.key}> {todo.key} {todo.text} // but here you can use the index, so you can update the todo easily // as you are just using it to get access of specific todo <input type="checkbox" checked={todo.isCompleted} onChange={() => props.handleCheck(index)}/> </div> ); }); WORKING DEMO :
1.. Introduction {#S0001} ================ β-cyclodextrin (β-CD) produced from starch by enzymatic conversion \[[1](#CIT0001)\], and made up of seven glucose molecules bonded together forming a ring \[[2](#CIT0002)\].can form inclusion complexes in its inner cavity with hydrophobic drugs, which can increase its solubility in water. However, the solubility of β-CD is very low, which is 1.6 mmol/L \[[3](#CIT0003),[4](#CIT0004)\]. Low water-solubility of β-CD limits its application as embedding medium for hydrophobic drugs \[[5](#CIT0005)--[7](#CIT0007)\]. So that improving the water solubility of β-CD becomes one of the researching highlights in the β-CD modification. Preparing the water soluble β-CD polymer is one way to increase β-CD solubility. In our former research reported that β-cyclodextrin-acrylamide (CDM-AM) copolymer was synthesized by β-CD maleate (CDM) and acrylamide using potassium persulfate (K~2~S~2~O~8~) as the initiator. The copolymer could increase the solubility of Methyl-2-benzimidazolecarbamate (MBC) and Natamycin (NM) and the stability constant of MBC·CDM-AM complex and NM·CDM-AM complex were 3000.89 M^−1^ and 10,725.45 M^−1^, respectively \[[8](#CIT0008)\]. However, that method needs adding chemical initiator. In this research, γ-ray was used to initiate the copolymer reaction. Radiation polymerization is one of the important research fields in radiation chemistry. Compared to the traditional technology of polymerization method, it has many advantages in improving the polymer performance, such as high grafting rate, no harmful chemical residue, no selectivity to substance and so on. Theoretically radiation polymerization technology can be applied to any monomer system \[[9](#CIT0009)\]. Irradiation polymerization has been used in carbohydrates hydrogel preparation and its swelling and phenol absorbing properties were also studied \[[10](#CIT0010)\]. However, irradiation polymerization has not been reported in the preparation of cyclodextrin polymer before. The aim of this work was to synthesize the water soluble cyclodextrin copolymer by radical polymerization initialized by γ-ray, and form the inclusion complexes with hydrophobic drugs, NM and MBC. And the difference of CDM-AM copolymers synthesized by irradiation and chemical initiator were also compared in this article. The synthetic complexes were systematically characterized using size exclusion chromatography (SEC), phase solubility diagrams, NMR spectra, FT-IR spectra. Additionally, the water solubility and fungicidal activity of NM, MBC, NM·CDM-AM and MBC·CDM-AM were investigated. 2.. Experimental {#S0002} ================ 2.1. Materials {#S0002-S2001} -------------- Maleic acid (MA) was purchased from Aladdin Industrial Corporation. NM and MBC were purchased from TCI. Sodium hypophosphite monohydrate (SHP), β-CD, 4-methoxyphenol, and acrylamide (AM) were purchased from China National Pharmaceutical Group Corporation. Dextran standard substance (relative molecular mass of 40,000) was purchased from Sigma-Aldrich (USA). All reagents were analytically pure unless otherwise noted. 2.2. Methods {#S0002-S2002} ------------ ### 2.2.1. Preparation of the CDM-AM copolymer {#S0002-S2002-S3001} 1. \(1\) Synthesis of CDM CDM was prepared via the semi-dry reaction method which was reported in our former research \[[11](#CIT0011)\]. β-CD (6 mmol) was mixed with maleic acid (24 mmol), SHP (6 mmol) and 4-methoxyphenol (0.6 mmol) in a pressure bottle. SHP was the catalyst, and 4-methoxyphenol was polymerization inhibitor to protect the double bond. Then certain amount of water was added in the pressure bottle, and the M/L mass ratio was 1:0.6. The pressure bottle was placed in a circulating air oven at 110 ^◦^C for3.5 h. The reaction mixture was cooled down to room temperature and crystallized at 4 ^◦^C in a refrigerator, and the crystals were purified by washing with 95% ethyl alcohol, followed by drying at 60 ^◦^C for 24 h. 1. \(2\) CDM-AM copolymer synthesized by irradiation CDM-AM was synthesized using γ-ray irradiation. The reaction mechanism for the synthesis of CDM-AM was shown as below: Previous research indicates that when the hydrone is irradiated by γ-ray, molecules accept the energy of radiation and then are ionized and excited to generate the hydrone ions free radical (H~2~O^+^) and hydrone molecules free radical (H~2~O·). Meanwhile, the H~2~O^+^ reacts with other molecules to generated H~3~O^+^ and hydroxyl radical (OH·). There is also a part of the water molecules directly ionized and decomposed into hydrogen radical (H·) and hydroxyl radical (OH·) \[[12](#CIT0012)\]. Therefore, the result of radiolysis reaction process of water molecular is the formation of three free radicals: H·, OH· and eaq^−^. OH· is a strong oxidant, which could induce the polymerization of vinyl monomer. The genenrated hydroxyl radical (OH·) attack the vinyl group of CDM and AM to initiate vinyl polymerization. 10.1080/15685551.2018.1480681-UF0001 Subsequent addition of monomer molecules to the initiated chain ultimately leads to the formation of the CDM-AM copolymer. 10.1080/15685551.2018.1480681-UF0002 10.1080/15685551.2018.1480681-UF0003 Polymerization of AM also occurs as a side reaction. Due to the steric hindrance of the β-CD ring in the CDM molecule, CDM cannot polymerize with itself. 10.1080/15685551.2018.1480681-UF0004 Thus, these experiments are designed to discover the major factors controlling the magnitude of polymerization. The synthetic steps leading to the formation of the CDM-AM copolymer are shown as follows: 1. a\) A specific amount of CDM (3.6 g) and AM (3.6 g) was weighed and placed in 50 mL conical flasks with a stopper. 10 mL of double distilled water (deoxygenated by purging through nitrogen gas) was added to the flask. The bottle of reaction mixture was handled with ^60^Co γ-ray radiation to start the radiation copolymerization (Irradiation was conducted at the institute of agricultural products processing irradiation center, Chinese academy of agricultural sciences), the temperature was 25 ^◦^C, dose range was from 2 to 10 kGy, and the dose rate was 0.5 kGy/h. 2. b\) After reaction completion, the reaction mixture was transferred to a beaker. The reaction flask was washed three times with 5 mL of double distilled water, and the polymer was precipitated by slowly adding 80 mL of ethanol to the beaker. The solution was then stirred until it turned clear. The supernatant was discarded and the sticky solid particles were collected and washed three times using 30 mL of anhydrous ethanol. The filter cake was collected and dried at 60°C in an oven for 6 h and weighed. Polyacrylamide (without CDM) was prepared using the same method described above. ### 2.2.2. The product yield and molecular weight of the CDM-AM copolymer {#S0002-S2002-S3002} The yield of product (YP) was calculated as follows: $$YP(\%) = \frac{W_{0}}{W_{1} + W_{2}} \times 100\%$$ where W~0~ is the weight of the polymer and W~1~ + W~2~ represents the added amounts of CDM and AM, respectively. The molecular weight of the CDM-AM copolymer was determined by combining a multi-angle laser light scattering instrument with size exclusion chromatography (MALLS-SEC, L-2130, HITACHI, Japan) and the method of molar mass calculation was based on MALLS with dn/dc, using dextran as the standard. The CDM-AM copolymer (1 mg/mL) was dissolved by moving phase (0.1 mol/L NaNO~3~ aqueous solution containing 0.2‰ NaN~3~) and filtered using a 0.45 μm membrane before SEC injection. The injection volumes were 200 μL, the UV detector wavelength was 280 nm, the differential refractometer detector wavelength was 690 nm and the flow rate was 0.50 mL/min. ### 2.2.3. Preparation of the inclusion complex with CDM-AM copolymer {#S0002-S2002-S3003} MBC (0.0478 g, 0.25 mmol) and NM (0.1664 g, 0.25 mmol) were mixed with the CDM-AM copolymer (1.013 g, M~w~ = 43,300), respectively, before adding 20 mL of double distilled water. The reaction mixture was then shaken at a speed of 120 r/min for 3 days. The turbid liquid was filtered through a 0.45 μm hydrophilic membrane filter using a syringe. Lastly, the filtrate was dried in a vacuum freeze drier. ### 2.2.4. Phase solubility studies {#S0002-S2002-S3004} The phase solubility studies were performed according to a method reported by Higuchi and Connors \[[13](#CIT0013)\]. NM (50 mg) and MBC (10 mg) were added in excess to aqueous solutions (10 mL) containing different concentrations of CDM-AM copolymer (0.23, 0.46, 0.69, 0.92, and 1.15 mmol/L). The weight-average molecular weight (M~w~) of CDM-AM copolymer was 43, 300. The stoppered conical flasks were sealed with a plastic film to avoid water evaporation and then shaken at 303 K, 313 K, and 323 K for 3 days. After equilibrium was established, the suspensions were filtered through a 0.45 μm hydrophilic membrane filter by using a syringe. The resulting filtrates were diluted and the concentrations of NM and MBC were analyzed at 303 and 281 nm using a UV-Vis spectrophotometer (UV-1800, SHIMADZU, Japan). The linear regression equations used to determine the concentrations of NM and MBC were as follows: A~NM~ = 0.0546 C (mg/mL) + 0.0031 (R^2^ = 0.9992) A~MBC~ = 0.0967 C (mg/mL) + 0.0003 (R^2^ = 0.9999) The apparent stability constant Kc was calculated from the linear line obtained from the phase solubility diagram. $$K_{c} = \frac{S{lope}}{S_{0}{(1 -}S{lope)}}$$ where S~0~ is the intrinsic solubility of NM and MBC in redistilled water in the absence of the CDM-AM copolymer. ### 2.2.5. UV spectroscopy {#S0002-S2002-S3005} The aqueous solution concentration of CDM-AM, acrylamide and β-cyclodextrin maleate (CDM) were 1 mg/ml, 2 μg/ml and 5 μg/ml, respectively. The UV spectroscopy was detected by using ultraviolet-visible spectrophotometer (UV -- 1800, Shimadzu, Japan), interval wavelength of scanning was 0.1 nm, and scanning speed was fast mode. ### 2.2.6. NMR spectroscopy {#S0002-S2002-S3006} ^1^H NMR spectra of CDM, NM, MBC, CDM-AM copolymer and the complexes of NM·CDM-AM and MBC·CDM-AM were collected at 25°C by a Bruker-500 spectrometer (AVANCE III, Bruker, Switzerland). All NMR samples were prepared in D~2~O, with the exception of NM and MBC, which were prepared in deuterated methanol. ### 2.2.7. Ft-ir {#S0002-S2002-S3007} FT-IR spectra were collected using a FT-IR Spectrometer (Tensor-37, BRUKER, Germany). Specifically, the sample was ground with KBr (about 200--400 mg) into a fine powder, placed into the sampling cup, smoothed, and compressed into a transparent flake by using a tablet machine. At this point, the sample was placed in the beam path and the FT-IR spectrum was obtained. ### 2.2.8. Thermal analysis {#S0002-S2002-S3008} Thermo-gravimetric analysis was performed using a TG/DTA thermal analyzer (Pyris-115, Perkin Elmer, USA) with the following experimental conditions: nitrogen atmosphere (25 mL/min), 10°C/min heating rate, and a scanning temperature range from 40°C to 450°C. ### 2.2.9. Scanning electron microscope {#S0002-S2002-S3009} A scanning electron microscope (SEM) examination was carried out by counting the CDM-AM copolymer samples on sub with double stick adhesive tape and coated with gold in a S150A sputter coater unit (Edwards, UK), the gold film thicknees was 150°A, then viewed in a JEOL JSM-6700F electron probe micro-analysis. ### 2.2.10. Bioassay of fungicidal activity {#S0002-S2002-S3010} The fungicidal activities of the NB, MBC, NM·CDM-AM and MBC·CDM-AM against A. niger (A. niger 04523 was purchased from Institute of Microbiology, Chinese Academy of Sciences) were determined using the Czapek Dox Agar method \[[8](#CIT0008)\]. Equilibrium turbid liquid prepared at 303 K in [section 2.2.2](#S0002-S2002-S3002) were filtered through a 0.45 μm hydrophilic membrane filter. About 15 μL of each of the filtrate, the inclusion complexes of NM·CDM-AM and MBC·CDM-AM were added to paper disks, respectively. Within 5 min, the drug-loaded paper disks (6 mm in diameter) were placed on an inoculated plate and incubated overnight at 30°C. The fungicidal activity was determined by the size of bacterial clearance, which was measured by calipers across an average diameter. ### 2.2.11. The determination of residual acrylamide monomer in CDM-AM copolymer {#S0002-S2002-S3011} Acrylamide standard substance solution (1 mg/ml) was diluted by gradient, and the final concentration of acrylamide was 0.25, 0.50, 1.00, 5.00, and 10.00 μg/mL, respectively. Then they were analyzed by selected HPLC conditions. The standard curve of acrylamide was drawn with concentration as the abscissa and peak area as the ordinate. Adequate amount of the CDM -- AM polymers were weighed accurately, dissolved by 10% (volume fraction) acetonitrile water solution, and configured to concentration of 1 mg/ml. The sample was filtered by 0.45 μm microporous membrane, analyzed by selected HPLC conditions. Chromatographic conditions were as follows: the stationary phase was Innoval -- C18 column (50 mm × 4.6 mm, 5 μm) from agela technologies. The mobile phase was a mixture of acetonitrile (A, 15%) and water (B, 85%). The flow rate was 1.0 mL/min and the injection volume was 10.00 μL. The UV detector wavelength was 200 nm and column temperature was 25°C. 3.. Results and discussion {#S0003} ========================== 3.1. Synthesis of the CDM-AM copolymer {#S0003-S2001} -------------------------------------- The Gel Permeation Chromatography (GPC) of the CDM-AM copolymer is shown in [Figure 1b](#F0001). Two peaks were observed before the solvent peak, and only the second peak (peak 2) demonstrated ultraviolet absorbance. The AM homopolymer is known to have greater amounts of amido bonds. Literature shows that amido bonds have an ultraviolet absorption peak at 280 nm. The GPC of the AM homopolymer also emphasized this point (shown in [Figure 1c](#F0001)). Based on this, it was determined that peak 2 was the homopolymer of AM. In general, carbohydrates do not absorb or only weakly absorb ultraviolet light at 280 nm, similar to the dextran standard ([Figure1a](#F0001)). Thus, peak 1 was assigned as the CDM-AM copolymer. The GPC of the synthetic CDM-AM copolymer ([Figure 1b](#F0001)) also proved that the CDM-AM copolymer was successfully synthesized, with the polymerization of AM occurring as a side reaction during the chemical synthesis. 10.1080/15685551.2018.1480681-F0001Figure 1.Gel Permeation Chromatography (GPC) of the Dextran (a), CDM-AM copolymer (b), AM homopolymer (c) and the factors affecting the preparation of CDM-AM copolymer (d-f). In this study, the weight-average molar mass of CDM-AM copolymer increased as AM was consumed in the reaction, and the value for the yield of the CDM-AM copolymer became stable when the CDM:AM mass ratio was 1:1. ([Figure 1d](#F0001)). The irradiation dose had significant influence on the weight-average molar mass and yield of the CDM-AM copolymer and homopolymer. The yield of CDM-AM increased with irradiation dose, however, weight-average molar mass decreased with irradiation dose ([Figure 1e](#F0001)). This result was due to the faster generation of free radicals at higher irradiation, leading to an increase in the rate of the termination reaction. The dosage of DMF solution had no significant influence on the weight-average molar mass the weight-average molar mass of the CDM-AM copolymer but the yield of product was decreased as the increasing of dosage of DMF solution ([Figure 1f](#F0001)). This result was due to the self-polymerization of acrylamide monomer. These results indicate that the optimized preparation conditions for the CDM-AM copolymer are as follows: CDM:AM mass ratio of 1:1; irradiation dose of 4 kGy; and using 20 mL of DMF water solution. The yield rate of CDM-AM was 75% in grams using these synthetic conditions. 3.2. Phase solubility studies {#S0003-S2002} ----------------------------- Phase solubility diagrams have been used extensively to investigate the solubility of particular drugs and agrochemicals in the presence of CDs \[[14](#CIT0014)\]. The phase solubility diagrams of NM and MBC in the presence of CDM-AM copolymers are presented in [Figure 2](#F0002). 10.1080/15685551.2018.1480681-F0002Figure 2.Phase solubility diagram of NM (a) and MBC (b) in the presence of CDM-AM copolymers. The solubility of NM and MBC increased linearly with increasing concentrations of CDM-AM. Thus, the phase solubility diagrams of NM·CDM-AM and MBC·CDM- AM could be classified as type A~L~ \[[13](#CIT0013)\], which was same with the complex of CDM-AM synthesized by chemical method. The values of the apparent stability constant Kc, calculated according to [Eq. 2](#M0002), were shown in [Table 1](#T0001). Compared with the water solubility of free NM and MBC, the solubility of NM and MBC increased 8.5-fold and 4.7-fold, respectively, in the presence of 1.15 mmol/L CDM-AM at temperature of 303 K. The concentration of NM and MBC in CMD-AM copolymer water solution also increased with temperature ([Table 1](#T0001)). But their solubility in CDM-AM synthesized by irradiation were lower than that synthesized by chemical initiator. This was because γ-ray could cause the degradation of cyclodextrin part in CMD-AM copolymer. According to the apparent stability constant shown in [Table 1](#T0001), the NM·CDM-AM complex was more stable than the MBC·CDM-AM complex in an aqueous solution. However, there was a decrease in the Kc values of the NM·CDM-AM and MBC·CDM-AM complexes as the temperature was increased, and the S~0~ values of NM and MBC were increased as the temperature increasing. In addition, the Kc of NM was higher than that of MBC at any temperature ([Table 1](#T0001)), indicating that the NM·CDM-AM complex was more stable than MBC·CDM-AM complex. Because of the increased molecular size of NM compared to MBC, NM was more suitable for the β-CD ring of the CDM-AM copolymer. These results suggested that the CDM-AM copolymer was a good biosorbent for NM and MBC from aqueous solutions and could be used as a drug carrier for NM and MBC. 10.1080/15685551.2018.1480681-T0001Table 1.The apparent stability constant K~c~ and S~0~ of the NM·CDM-AM complex and the MBC·CDM-AM complex at different temperature.Temperature303 K313 K323 KK~c~ of NM (M^−1^)13,446.068672.007840.49S~0~ of NM (mmol/L)0.08870.1440.206K~c~ of MBC(M^−1^)2595.302028.191429.51S~0~ of MBC (mmol/L)0.04790.06040.0794 3.3. Characterization of CDM-AM copolymer and complexes with NM and MBC {#S0003-S2003} ----------------------------------------------------------------------- 1. \(1\) UV spectra CDM and acrylamide had absorption peaks under wavelength 210 nm and 205 nm, respectively. There was no ultraviolet absorption peak in the UV spectra of CDM-AM polymer at the wavelengths mentioned above ([Figure3a](#F0003)), which indicated that there was intermolecular polymerization between CDM and acrylamide due to the double bond in the CDM and acrylamide being damaged. UV spectra was confirmed that the CDM-AM polymer was synthesized. Ultraviolet spectrum of carbendazim and MBC·CDM-AM was shown in [Figure3b](#F0003). MBC had biggest ultraviolet absorption at 274.6 nm, but maximum absorption of MBC·CDM-AM complex redshifted to 275.6 nm. Similar phenomenon was observed in the UV spectrum of NM and NM·CDM-AM complex. The biggest ultraviolet absorption of NM was at 303.5 nm, but the biggest ultraviolet absorption of NM·CDM-AM complex redshifted to 305 nm ([Figure3c](#F0003)). The UV spectrums indicated that CDM-AM could form complexes with MBC and NM.10.1080/15685551.2018.1480681-F0003Figure 3.UV spectra of CDM, CDM-AM, Acrylamide (a), MBC, MBC·CDM-AM (b), and NM, NM·CDM-AM (c); FT-IR spectra of polyacrylamide, CDM-AM copolymer (d); FT-IR spectra of inclusion complex MBC·CDM-AM, and inclusion complex of NM·CDM-AM (e). 1. \(2\) FT-IR spectra The FT-IR spectra of polyacrylamide and the CDM-AM copolymer were presented in [Figure 3d](#F0003). For the CDM-AM copolymer, absorption bands were observed around 3201 (ν NH~2~), 1658 (ν C = O), and 1560 cm^−1^ (δ NH~2~), as well as 1154, 1081, and 1044 cm^−1^ (ν C-OH of cyclodextrin) ([Figure 3d](#F0003)). The absorption peak at 1642 cm^−1^ for the stretching vibration of the alkene (C = C) bond in CDM also disappeared \[[8](#CIT0008)\]. These results indicate that the CDM-AM copolymer was synthesized. For polyacrylamide, a peak at 1124 cm^−1^ (corresponding to the C-N-C stretch, [Figure 3d](#F0003)) was observed, suggesting that there is intermolecular cross-linking in the polyacrylamide molecule. The absence of any signal at 1124 cm^−1^ (corresponding to the C-N-C stretch) indicates that the CDM-AM molecule could be a linear high-molecular polymer. The absorption peaks at 2865 and 2780 cm^−1^ also disappeared in the spectrum for the CDM-AM copolymer compared to the polyacrylamide spectrum, further confirming the successful synthesis of the CDM-AM copolymer. The variation in the shape, shift, and intensity of the FT-IR absorption peaks for the guest or host can provide enough evidence for inclusion \[[15](#CIT0015)\]. The FT-IR spectra of the inclusion complex MBC·CDM-AM and the inclusion complex NM·CDM-AM were presented in [Figure3e](#F0003). In the FT-IR spectrum of the MBC·CDM-AM complex, there were characteristic absorption peak of MBC during 1250 cm^−1^ and 1100 cm^−1^. However, the band corresponding to the C = O stretching vibration of the ester at 1712 cm^−1^ disappears in the complex. The band at 1044 cm^−1^, corresponding to the C-OH stretching vibration of the cyclodextrin, shifted to 1030 cm^−1^. Meanwhile, the intensities of the C-H bending vibration of the benzene ring and conjugated double bond at 732 cm^−1^ increased, and the NH of the secondary amide at 1475 cm^−1^ was decreased \[[8](#CIT0008)\]. Therefore, the FT-IR spectra confirm the formation of the inclusion complex, specifically with the benzene ring of MBC included into the CDM-AM cavity. Moreover, In the FT-IR spectrum of the NM·CDM-AM complex ([Figure 3e](#F0003)), the band corresponding to the C = O stretching vibration of the ester in NM at 1715 cm^−1^, also disappeared in the complex \[[8](#CIT0008)\]. The band corresponding to the C-OH stretching vibration of cyclodextrin at 1044 cm^−1^ shifted to 1030 cm^−1^. Therefore, FT-IR spectra confirm that the inclusion complex NM·CDM-AM was formed and that NM was included into the CDM-AM cavity. 1. \(3\) NMR spectra When comparing the ^1^H NMR spectra for NM, the CDM-AM copolymer and their complex, the 1H chemical shift of the conjugated double bond in NM was divided from one multiplet peak to several singlet peaks between 6.0 ppm and 6.5 ppm ([Figure 4d](#F0004)). A doublet corresponding to the carboxylic acid group at 6.74 ppm (11-H) was also observed. Peaks corresponding to the vinyl proton of the unsaturated ester at 6.48 ppm and 6.39 pm ([Figure4b](#F0004)) also appear in the 1H spectrum for the NM·CDM-AM complex ([Figure 4b](#F0004)). These results suggest that the chemical shifts of the two H atoms in the vinyl group become divided when the NM·CDM-AM complex is formed. In summary, we determined that the NM·CDM-AM complex was successfully formed. 10.1080/15685551.2018.1480681-F0004Figure 4.^1^H NMR spectrum of the CDM-AM copolymer (a); ^1^H NMR spectra of the CDM-AM copolymer and its complexes with NM and MBC (b); ^1^H NMR spectrum and molecular structure of MBC (c); ^1^H NMR spectrum and molecular structure of NM(d). Similar to NM, we found that the 1H chemical shifts of the benzene rings in MBC also shifted from high field to low field in the MBC·CDM-AM complex. The ^1^H chemical shifts of the benzene rings in MBC were observed at 7.13 ppm (2-H and 3-H) and 7.40 ppm (1-H and 4-H) ([Figure 4c](#F0004)), while the 1H chemical shifts for the benzene rings of the MBC·CDM-AM complex were observed at 7.44 ppm (1-H and 4-H) and 7.27 ppm (2-H and 3-H) ([Figure 4a](#F0004)). The peak pattern of 2-H and 3-H in the benzene ring of MBC also changed, indicating that the active site of MBC within the CD ring was located at the 2-H and 3-H atoms in the benzene ring of MBC. These results also suggest that the MBC·CDM-AM complex was successfully formed. In conclusion, MBC and NM were both able to form inclusion complexes with the CDM-AM copolymer. 3.4. Thermo-gravimetric analysis (TGA) and SEM analysis {#S0003-S2004} ------------------------------------------------------- Thermo-gravimetric analysis (TGA) was carried out to determine the thermal properties and stability of the new biomaterials. [Figure 5](#F0005) shows the thermo-gravimetric (TG) curves and the first derivative TG (DTG) traces of CDM-AM copolymer and its complex with NM ([Figure5a](#F0005)) and MBC ([Figure5b](#F0005)). The TG curves show that the thermal stability of the NM·CDM-AM complex was Higher than that of NM. The initial degradation temperature of the NM embedding in CDM-AM copolymer was 219.8 ºC, whereas NM was 204.1 ºC ([Figure 5a](#F0005)). The initial degradation temperature of the MBC embedding in CDM-AM copolymer was 273.1 ºC, whereas MBC was 247.6 ºC ([Figure 5a](#F0005)). Based on the DTG curves of the drugs complex with CDM-AM copolymer, the maximum weight losses for the NM·CDM-AM complex and MBC·CDM-AM complex were 57.7 μg/min at 175 ºC and 58 μg/min at 273.1 ºC, respectively. The TGA results also provide further proof that NM and MBC can be formed the complex with the CDM-AM copolymer and the cyclodextrin ring of CDM-AM copolymer could protect the drugs embedding in it. 10.1080/15685551.2018.1480681-F0005Figure 5.TGA curves of CDM and the CDM-AM copolymer (a: TG and DTG of the CDM-AM copolymer and its complex with NM; b: TG and DTG of the CDM-AM copolymer and its complex with MBC) and SEM picture of CDM-AM copolymer (c). According to scanning electron microscopy (SEM) picture of CDM-AM polymer, the structure of CDM-AM copolymer was porous structure (as shown in [Figure5c](#F0005)), which was obvious different in the crystalline structure of CDM and acrylamide, and this can be used as the synthesis evidence of CDM-AM polymer one of. At the same time, the cellular structure of polymer availed the water molecules to enter the inside of polymer, which could increase the dissolution rate of polymer in the water. That's why the CDM -- AM copolymer had a good solubility in water. 3.5. Fungicidal activity {#S0003-S2005} ------------------------ NM and MBC were chosen as research object based on their wide application and effectiveness in the field of agriculture and food storage. The zones of inhibition were defined as the maximum distances between the test disk and the fungal growth edge. The zones of inhibition for NM·CDM-AM (a1-a5) and the zones of inhibition for MBC·CDM-AM (b1-b5) are shown in [Figure 6](#F0006). According the inhibition zones of the complexes, the growth of hypha was significantly inhibited, because the CDM-AM copolymer significantly improved the water solubility and the bioavailability of NM and MBC. The inhibition ability of the drugs also increased with concentrations of the CDM-AM copolymer ([Figure 6c](#F0006)). Compared to free NM and MBC, the NM and MBC complexes demonstrated a 1.92- and 1.73-fold increase in fungicidal activity at a concentration of 1.15 mmol/L (CDM-AM copolymer), respectively. The zone of inhibition gradually decreased over time, therefor the complexes had slow-releasing potential. Although the concentration of NM in the CDM-AM solution was higher than that of MBC, the inhibition zones showed no significant differences from each other, probably because the NM·CDM-AM copolymer complex was more stable than that of MBC·CDM-AM. Together, these results may provide useful information for the facile application of both NM and MBC. 10.1080/15685551.2018.1480681-F0006Figure 6.The fungicidal activity for A. niger of the inclusion complex NM·CDM-AM (a1--5) and the inclusion complex of MBC·CDM-AM (b1--5). Compared to the fungicidal ability of the complex of CDM-AM copolymer synthesized by chemical method with NM and MBC \[[8](#CIT0008)\], the fungicidal ability of the complex NM·CDM-AM and MBC·CDM-AM were all decreased in this paper. That's because the radiation could degrade the cyclodextrin ring of the CDM-AM copolymer and there was less absorption part in the copolymer synthesized by irradiation. 3.6. Residual amount of acrylamide {#S0003-S2006} ---------------------------------- Based on HPLC spectra of acrylamide homopolymer, acrylamide can form two kinds of homopolymer under the γ-ray irradiation, of which retention time was 1.711 min and 2.758 min ([Figure7c](#F0007)). Hence in the HPLC spectra of CDM-AM copolymer, absorption peaks at retention time of 2.752 min was PAM homopolymers, and the absorption peak at retention time of 1.925 min was CDM-AM polymer (as shown in [Figure7b](#F0007)). HPLC spectra of CDM-AM polymer also further confirmed the synthesis of CDM-AM polymer. Residual amount of acrylamide under different irradiation dose was calculated by the standard equation shew in [Figure7a](#F0007). With the increase of irradiation dose, the residues amount of acrylamide gradually reduced ([Figure7d](#F0007)). 10.1080/15685551.2018.1480681-F0007Figure 7.The HPLC spectra and standard curve of acrylamide (a); HPLC spectra of CDM-AM (b) and Acrylamide homopolymer (c); Residual amount of acrylamide in CDM-AM copolymer (d). 4.. Conclusion {#S0004} ============== In this work, the CDM-AM copolymer was prepared from AM and CDM using γ-ray as an initiator. The preparation conditions for the CDM-AM copolymer were as follows: CDM:AM mass ratio of 1:1; irradiation dose of 4 kGy; and using 20 mL of DMF water solution. The yield rate of CDM-AM was 75% in grams using these synthetic conditions. The complexes of NM·CDM-AM and MBC·CDM-AM were also prepared, with apparent stability constants at 303 K of 13,446.06 M^−1^ and 2595.30 M^−1^, respectively. The NM·CDM-AM and MBC·CDM-AM complexes demonstrated significantly improved water solubility and NM/MBC bioavailability, providing a promising approach for the more straightforward application of NM and MBC. As the increase of irradiation dose, the residues amount of acrylamide gradually reduced, however, the high irradiation dose also can cause the degradation of β-cyclodextrin which could reduce the CDM-AM polymer's solubilization effect on hydrophobic drugs. Acknowledgments =============== This work was financially supported by the "Irradiation preservation and processing engineering technology research center of Guizhou agricultural products" (Grant No. 2016-5203) and the "Service enterprise plan of Guizhou scientific research institutions" (Grant No.20165712). Disclosure statement {#S0005} ==================== No potential conflict of interest was reported by the authors.
Let’s be lazy, & Talk. Servers IdleChat uses a Round-Robin DNS to handle all incoming connections to the network if you encounter a problem connecting to IdleChat just try to reconnect after a few seconds. Some of the server names below do not really exist you will not be able to connect to them directly you’ll need to use the round-robin. We have ports 6660-6669 accessible for use along with SSL port 6697
A Cape Breton man who was found not criminally responsible for killing his wife is entitled to receive 100 per cent of her life insurance policy, a Nova Scotia Supreme Court judge has ruled. Richard Maidment, 42, who also uses the surname McNeil, killed Sarabeth Forbes on April 18, 2017, in the home they shared in Gardiner Mines, N.S. Maidment has schizophrenia and his mental health had been deteriorating dramatically in the days before the killing. Forbes and their son, then 10 years old, had moved out of the residence as a precaution the day before. But on the morning of April 18 she returned to the home, where she was killed. Maidment was charged with first-degree murder, but in December 2017 was found not criminally responsible and confined to the East Coast Forensic Hospital in Dartmouth, Nova Scotia's only secure psychiatric facility. In 2015, Forbes had purchased a life insurance policy for herself naming Maidment as the beneficiary. She named their son as an alternate beneficiary. Friends described Sarabeth Forbes as 'always smiling ... a bubbly personality, and would do anything for anybody.' (Submitted by Tanya Hennick-McNeil) Maidment's mother, Linda McNeil, claimed the insurance money on behalf of her son. Forbes's mother, Emeline Forbes, who is now raising the couple's son, applied for the insurance money on his behalf. Because there were competing claims, Co-operators Life Insurance Company paid the claim to the court and left it to a judge to decide. In a decision released Thursday, Justice Frank Edwards ruled the money should go to Maidment, not his son. The decision does not disclose the amount of the payout. "There is a public policy rule which says criminals should not be permitted to benefit from their crimes," Edwards wrote. "That public policy rule has no application to this case. Richard has been found to be not criminally responsible. He is not a criminal." Edwards is the same judge who found Maidment not criminally responsible for the killing, an event he describes as "an unspeakably horrendous and tragic event for everyone involved." Friends say money should be in trust Friends of Forbes said they were "sickened" to hear about the ruling and that it took them back to the day they learned of Forbes's death. "Just a gut-wrenching feeling in the pit of your stomach that this can't be real, this can't be happening. But now it's happened twice," said Valerie Youden, who worked with Forbes at Parkland, a senior's home in Sydney, N.S. Forbes also worked as a teacher's assistant at an elementary in nearby Reserve Mines. Co-workers of Sarabeth Forbes said they were 'sickened' to hear about the ruling. (Submitted by Tanya Hennick-McNeil) "I know he has a mental illness, but he still chose to brutally [kill] Sarabeth, and we all have choices in life, and he made that one," said Terri Spooney, another friend and co-worker, adding Forbes would be "devastated" if she knew he'd received the money instead of her son. Both women believe Forbes would have wanted the money to go to her son's future. 'Heart bigger than life itself' Youden considers the situation a failure of the justice system. "If you're not responsible for her death, he shouldn't be [considered] responsible enough to get the money," she said. She and Spooney said they try to focus on memories of Forbes instead of her death. They said there's rarely a day at work that either a client or co-worker doesn't refer to her fondly. "She was always smiling, always laughing, a bubbly personality, and would do anything for anybody. She had a heart bigger than life itself," said Spooney. MORE TOP STORIES
Top Tabs Thursday, September 18, 2008 From the Mailbag: Kaboose Kaboose (KAB) is an online advertiser and marketer geared towards families and children. Some of the websites they run include bounty.com, babyzone.com, amazingmoms.com, funschool.com, zeeks.com and bubbleshare.com. Kaboose generates revenues through online commerce and advertising. The company has been growing revenues primarily by making strategic acquisitions of websites geared towards their target market segment. First, lets review some of the Kaboose value indicators. The stock has a market cap of $101.5M based on the current market price of $0.73/sh. The relatively small market cap suggests that analyst coverage is likely low and could lead to pricing inefficiencies with the stock, as we've discussed here. The company is not yet profitable, however, with an equity value of $180M the price to book ratio is 0.53. Depending on the quality of the assets, the low P/B value could indicate value. As a value investor, I heed the warnings of Graham, Dreman and Athanassakos (and others) and do not attempt to forecast a company's earnings. Rather, I assess earnings strength by looking at the track record of what a company has already accomplished and make a judgment if the results are repeatable or not (look out for value traps!). Kaboose is not at a stage where they can show consistent positive free cash flow, and for this reason I personally would never buy a company like Kaboose until they have demonstrated the capacity to do so. For that reason, let's review the quality of Kaboose's assets to see if value can be found there. Total assets for Kaboose total $249M. Reviewing the assets, I found that goodwill and intangibles are on the books at $107.5M and $103.5M respectively. Goodwill is the excess value paid over the fair value of the assets acquired. Presumably, Kaboose is paying more than fair value for the acquired assets because they believe those assets will generate adequate returns in the future to justify the price paid. However, since Kaboose has yet to demonstrate profitable operations, I have deep reservations about recognizing goodwill at its book value. By the same line of reasoning, I have similar reservations about recognizing the full intangible value on the books. Even though the price to book value looked promising for Kaboose, the tangible asset value of the company is rather small compared to the total assets. Since the goodwill and intangibles represent $210.5M of the total asset value of $249M, if those "softer" assets are impaired significantly, an investor may be investing in a company with a very high price to book ratio. The extremes then for price to book would be the 0.53 if you accept goodwill and intangibles at book value ranging up to 2.6 if you write them off completely. I don't know what the real value of goodwill and intangibles should be but without a reliable gauge on earnings strength I would tend towards being more conservative rather than optimistic in my judgment. To summarize, since Kaboose has not yet demonstrated the ability to consistently generate positive free cash flows and that the asset value of the company is predominantly made up of goodwill and intangibles (84.5% of the total assets), I would consider putting money into the company's stock at this time speculative in nature without adequate protection of capital. Of course, people that decide to speculate under such circumstances and subsequently do well may be left with an exciting sense of precognitive abilities that are bound to disappoint in the future.
[Variation in closeness to reality of standardized resuscitation scenarios : Effects on the success of cognitive learning of medical students]. Simulation often relies on a case-based learning approach and is used as a teaching tool for a variety of audiences. The knowledge transfer goes beyond the mere exchange of soft skills and practical abilities and also includes practical knowledge and decision-making behavior; however, verification of knowledge or practical skills seldom unfolds during simulations. Simulation-based learning seems to affect many learning domains and can, therefore, be considered to be multifactorial in nature. At present, studies examining the effects of learning environments with varying levels of reality on the cognitive long-term retention of students are lacking. The present study focused on the question whether case scenarios with varying levels of reality produce differences in the cognitive long-term retention of students, in particular with regard to the learning dimensions knowledge, understanding and transfer. The study was conducted on 153 students in the first clinical semester at the Justus-Liebig University of Giessen. Students were randomly selected and subsequently assigned, also in a random fashion, to two practice groups, i.e. realistic and unrealistic. In both groups the students were presented with standardized case scenarios consisting of three case studies, which were accurately defined with a case report containing a detailed description of each scenario and all relevant values so as to ensure identical conditions for both groups. The unrealistic group sat in an unfurnished practice room as a learning environment. The realistic group sat in a furnished learning environment with various background pictures and ambient noise. Students received examination questions before, immediately following and 14 days after the practice. Examination questions were identical at each of the three time points, classified into three learning dimensions following Bloom's taxonomy and evaluated. Furthermore, examination questions were supplemented by a questionnaire concerning the individual perception of reality and own learning success, to be filled in by students immediately after the practice. Examination questions and questionnaires were anonymous but associated with each other. Even with less experienced participants, realistic simulation design led to a significant increase of knowledge immediately after the end of the simulation. This effect, however, did not impact the cognitive long-term retention of students. While the realistic group showed a higher initial knowledge after the simulation, this "knowledge delta" was forgotten within 14 days, putting them back on par with the unrealistic comparison group. It could be significantly demonstrated that 2 weeks after the practice, comprehension questions were answered better than those on pure knowledge. Therefore, it can be concluded that even vaguely realistic simulation scenarios affect the learning dimension of understanding. For simulation-based learning the outcome depends not only on knowledge, practical skills and motivational variables but also on the onset of negative emotions, perception of own ability and personality profile. Simulation training alone does not appear to guarantee learning success but it seems to be necessary to establish a simulation setting suitable for the education level, needs and personality characteristics of the students.
This invention relates to a hand-operated remote control unit for controlling the welding current of a welding machine, and more particularly to such a device in which a single push button activates and controls the current of a welding machine with a universal mounting structure which permits the positioning of the welding torch in a variety of positions and permits the push button to be easily operated with different digits of the hand without effecting the welding operations being performed. A number of prior art devices have been disclosed which utilize remote control means for controlling the welding current of a welding torch without resorting to returning to the welding console for such an adjustment. Two of the more recent patents dealing with this problem are U.S. Pat. No. 3,968,341 and U.S. Pat. No. 4,051,344 include on-off switches and linearly actuated controls for varying the resistance of a potentiometer to vary the current supplied to a welding torch. Both devices are hand-held and thumb operated. The problem with this type of operation is that the thumb is better used for support than for control because the thumb has less sensitivity to touch than the other digits of the hand. Further, using linear operation requires pressure on the handle which tends to move the torch and changes its position with respect to the workpiece thereby producing a faulty weld. In U.S. Pat. No. 3,968,341, the control is actually mounted in the handle and accordingly the positioning of the welding torch is limited since the thumb must be positioned on the actuating control. In U.S. Pat. No. 4,051,344, the control unit is strapped to the welding torch again restricting movement in all but a rotational sense.
Dentate Gyrus Immaturity in Schizophrenia. Hippocampal abnormalities have been heavily implicated in the pathophysiology of schizophrenia. The dentate gyrus of the hippocampus was shown to manifest an immature molecular profile in schizophrenia subjects, as well as in various animal models of the disorder. In this position paper, we advance a hypothesis that this immature molecular profile is accompanied by an identifiable immature morphology of the dentate gyrus granule cell layer. We adduce evidence for arrested maturation of the dentate gyrus in the human schizophrenia-affected brain, as well as multiple rodent models of the disease. Implications of this neurohistopathological signature for current theory regarding the development of schizophrenia are discussed.
Mechanical discoordination rather than dyssynchrony predicts reverse remodeling upon cardiac resynchronization. By current guidelines a considerable part of the patients selected for cardiac resynchronization therapy (CRT) do not respond to the therapy. We hypothesized that mechanical discoordination [opposite strain within the left ventricular (LV) wall] predicts reversal of LV remodeling upon CRT better than mechanical dyssynchrony. MRI tagging images were acquired in CRT candidates (n = 19) and in healthy control subjects (n = 9). Circumferential strain (epsilon(cc)) was determined in 160 regions. From epsilon(cc) signals we derived 1) an index of mechanical discoordination [internal stretch fraction (ISF), defined as the ratio of stretch to shortening during ejection] and 2) indexes of mechanical dyssynchrony: the 10-90% width of time to onset of shortening, time to peak shortening, and end-systolic strain. LV end-diastolic volume (LVEDV), end-systolic volume (LVESV), and ejection fraction (LVEF) were determined before and after 3 mo of CRT. Responders were defined as those patients in whom LVESV decreased by >15%. In responders (n = 10), CRT increased LVEF and decreased LVEDV and LVESV (11 +/- 6%, 21 +/- 16%, and 30 +/- 16%, respectively) significantly more (P < 0.05) than in nonresponders (1 +/- 6%, 3 +/- 4%, and 5 +/- 10%, respectively). Among mechanical indexes, only ISF was different between responders and nonresponders (0.53 +/- 0.25 vs. 0.31 +/- 0.16; P < 0.05). In patients with ISF >0.4 (n = 10), LVESV decreased by 31 +/- 18% vs. 5 +/- 11% in patients with ISF <0.4 (P < 0.05). We conclude that mechanical discoordination, as estimated from ISF, is a better predictor of reverse remodeling after CRT than differences in time to onset and time to peak shortening. Therefore, discoordination rather than dyssynchrony appears to reflect the reserve contractile capacity that can be recruited by CRT.
Q: How do I do bi-directional communications using "Codename One New-Java-Javascript-Interop-API" in my specific use-case? I have an App that makes intensive use of "browserComponent" and bidirectional communication between Java native code and Javascript embedded inside web-page content. Now we must make the same thing using the New Async-Java-Javascript-Interop-API recently posted by Codename One. In this context, We need to bind all buttons present on each page to routines coded in native code (java/kotlin) as soon the page is loaded. How can we capture "Html Body on-load-event" and how to activate all binds that we need? And, on the opposite direction, the java code that was triggered by html-buttons mentioned before, interacts with the users. As a result of these interactions, normally it is necessary to modify some values of javascript variables (frequently a javascript-array) and that way allowing update dynamically the document model in the Browser. Any help on this subject will be very appreciated. A: You can bind an onLoad event in the same way you always could using: browserComponent.addWebEventListener(BrowserComponent.onLoad, e -> { // here you can start writing JS binding code }); For reference this article covers everything else about the binding https://www.codenameone.com/blog/new-async-java-javascript-interop-api.html
Q: Form Shows and hides again: jQuery I am trying to show a form when a button is clicked. But it shows for a second or so and then hides again. i have tried to debug it with console.log() but my jQuery is not reaching in the anonymous function in click event in chrome console it is showing me this error "Failed to load resource: the server responded with a status of 400 (Bad Request)" Here is my code <asp:Panel id="dynamicSubForm" class="container" runat="server" DefaultButton="btnAdd" DefaultFocus="txtSegment"> <div class="subForm"> <div id="subHeader" class="divRow"> <div class="headCol"> <asp:Label ID="Label1" runat="server" Text="Sources"></asp:Label> </div> </div> <div class="subFormRow"> <div>&nbsp</div> </div> <div class="subFormRow"> <div class="subFormFstCol"> <asp:Label ID="lblSource" runat="server" Text="Source:"></asp:Label> </div> <div class="subFormScndCol"> <asp:DropDownList ID="ddlSource" runat="server" CssClass="whiteCtrl" DataSourceID="dsIrrigationSource" DataTextField="title" DataValueField="id"> </asp:DropDownList> <asp:SqlDataSource ID="dsIrrigationSource" runat="server" ConnectionString="<%$ ConnectionStrings:MIS4BOSConnectionString %>" SelectCommand="SELECT [id], [title] FROM [IrrigationSource]"> </asp:SqlDataSource> </div> </div> <div class="subFormRow"> <div class="subFormFstCol"> <asp:Label ID="lblSegment" runat="server" Text="Area Segment:"></asp:Label> </div> <div class="subFormScndCol"> <asp:TextBox ID="txtSegment" runat="server" CssClass="whiteCtrl"></asp:TextBox> %<br /> <asp:RegularExpressionValidator ID="revArea0" runat="server" ControlToValidate="txtSegment" Display="Dynamic" ErrorMessage="Percentage must be all digits." ValidationExpression="^[0-9]+$"></asp:RegularExpressionValidator> </div> </div> <div class="subFormRow"> <div class="subFormFstCol"> <asp:Label ID="lblDesc" runat="server" Text="Description:"></asp:Label> </div> <div class="subFormScndCol"> <asp:TextBox ID="txtDesc" runat="server" Height="70px" TextMode="MultiLine" CssClass="whiteCtrl"></asp:TextBox> </div> </div> <div class="subFormRow"> <div class="subFormFstCol"> </div> <div class="subFormScndCol"> <asp:Button ID="btnAdd" runat="server" CssClass="blueBtn" Text="Add" onclick="btnAdd_Click" /> <asp:Button ID="btnAddCancel" runat="server" CssClass="whiteBtn" Text="Cancel" onclick="btnAddCancel_Click" /> <br /> <asp:Label ID="lblSubMsg" runat="server" CssClass="usrMsg" Text="Label" Visible="False"></asp:Label> </div> </div> </div> </asp:Panel> enter code here <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"> </script> <script> $(document).ready(function () { var subForm = $("#MainContent_dynamicSubForm"); var addBtn = $("#irrAdd"); console.log("before hide"); subForm.hide(); console.log("after hide"); addBtn.on("click", function () { console.log('in anonymous func'); subForm.show(); }); }); </script> A: The form is probably submitting, do it like this addBtn.on("click", function (e) { e.preventDefault(); console.log('in anonymous func'); subForm.show(); }); Note that the id irrAdd does not seem to exist in the markup, and that you could also change the buttons type to button
Differential distributions of peptides in the epidermal growth factor family and phosphorylation of ErbB 1 receptor in adult rat brain. Using two-site enzyme immunoassays, we measured protein levels of epidermal growth factor (EGF), transforming growth factor alpha (TGF alpha), and heparin-binding epidermal growth factor (HB-EGF) in adult rat brain, and compared them with the phosphorylation levels of their receptor (ErbB 1). There were significant variations in the brain distributions of each ErbB 1 ligand. Among these ErbB 1 ligands, HB-EGF protein levels were higher than those of TGF alpha and those of EGF were the lowest. TGF alpha protein was relatively enriched in the midbrain regions, while HB-EGF levels were most abundant in the cerebellum. Protein distributions of the EGF family members were discordant with previously reported mRNA distributions. In addition, there was significant basal ErbB 1 phosphorylation detected with the largest amount of activation in the midbrain. These observations suggest that the activation of brain ErbB 1 involves post-translational regulation of multiple EGF family members in a region-specific manner.
<?xml version="1.0" encoding="utf-8"?> <ProjectItem Type="Microsoft.VisualStudio.SharePoint.Module" DefaultFile="Elements.xml" SupportedTrustLevels="All" SupportedDeploymentScopes="Web, Site" xmlns="http://schemas.microsoft.com/VisualStudio/2010/SharePointTools/SharePointProjectItemModel"> <Files> <ProjectItemFile Source="Elements.xml" Type="ElementManifest" /> </Files> </ProjectItem>
include $(top_srcdir)/glib.mk noinst_LTLIBRARIES += libgiowin32.la libgiowin32_la_SOURCES = \ gwin32directorymonitor.c \ gwin32directorymonitor.h \ gwinhttpvfs.c \ gwinhttpvfs.h \ gwinhttpfile.c \ gwinhttpfile.h \ gwinhttpfileinputstream.c \ gwinhttpfileinputstream.h \ gwinhttpfileoutputstream.c \ gwinhttpfileoutputstream.h \ winhttp.h \ $(NULL) libgiowin32_la_CFLAGS = \ $(GLIB_HIDDEN_VISIBILITY_CFLAGS) \ -DG_LOG_DOMAIN=\"GLib-GIO\" \ $(gio_INCLUDES) \ $(GLIB_DEBUG_FLAGS) \ -DGIO_MODULE_DIR=\"$(GIO_MODULE_DIR)\" \ -DGIO_COMPILATION \ -DG_DISABLE_DEPRECATED
Q: Difference between Angular Translate's useSanitizeValueStrategy('sanitize') vs. useSanitizeValueStrategy('sanitizeParameters') I am using angular-translate for localization and I'm a little unclear on the differences between the 'sanitize' and 'sanitizeParameters' strategies for escaping. I've already checked out the docs but I still don't understand what "sanitize HTML in the translation text" means vs "sanitizes HTML in the values". Is one less secure than the other? If it is only sanitizing the HTML in the values of the interpolation parameter, is it not thoroughly sanitizing the content? The only differences I can see is that 'sanitizeParamters' will escape html when using a filter, while 'sanitize' does not. I can't use the 'sanitize' strategy because of the utf8 issue addressed here, so I want to make sure using 'sanitizeParameters' is a secure strategy. A: I took a look directly in the official angular source file ‘angular-translate.js’ and determined the following sanitize - Sanitizes the complete translation including all parameters sanitizeParameters -> Sanitizes only the parameters The same logic applies for both escape and escapeParameters. Note: Strategies can be combined ie. $translateProvider.useSanitizeValueStrategy(['sanitize', 'escapeParameters']);
Hepatitis C virus RNA profiles in chronically infected individuals: do they relate to disease activity? Fluctuations of hepatitis C virus (HCV)-RNA serum levels were monitored in a multicenter study in 76 chronic HCV carriers who had been followed longitudinally without receiving antiviral therapy to assess their relation with the course of liver disease activity. Forty-four patients had normal transaminases over more than 2 years, while 32 additional patients had fluctuating levels. Viral load was measured in serial serum samples prospectively collected for 10 to 12 months in 54 patients and in sera stored yearly up to 8 years in an additional 22 patients. In patients tested monthly, a lesser extent of fluctuations was detected in cases with constantly normal transaminases as compared with those with fluctuating transaminases. In the former group, the mean difference between maximum and minimum values observed in each individual patient was 0.7 Log, while in the latter group, it was 1.3 Log (P =.0004). Most of these patients experienced, on average, three peaks of viremia over 1 year. The range of variation observed upon yearly testing was between 0.2 and 2.2 Log and did not reach statistical significance between the two groups. In conclusion, a careful viral replication profile can be achieved only by monthly testing, because longer time intervals could miss viremia fluctuations. HCV-RNA levels are more stable in asymptomatic HCV carriers than in patients with biochemical activity of liver disease.
Unreal Engine 4 "probably around 2014" The next version of video game engine Unreal will probably launch in 2014, Epic has said. Epic Games founder Tim Sweeney told IGN he spends 60 per cent of his time researching Epic's next generation engine: Unreal Engine 4, and the next generation of consoles. "This is technology that won't see the light of day until probably around 2014," he said, "but focusing on that horizon enables me to do some really cool things that just aren't practical today, but soon will be. "Some of our most productive work in the industry was on the first Unreal engine back in 1996, when I wrote a software renderer with a bunch of new features that hadn't been seen before. "I feel like that's what I'm doing now on Unreal Engine 4 in exploring areas of the technology nobody else is really yet contemplating because they're still a few years away from practicality. But I see a huge amount of potential there and so it's very, very fun work." A 2014 release for Unreal Engine 4 suggests a 2014 release for the next generation of consoles, given it is unlikely Epic would release it for use on PlayStation 3 and Xbox 360 games. In July CryEngine maker Crytek told Eurogamer video game graphics achieved using the DirectX 11 standard provide a solid indication of the visual power of the next Xbox and PlayStation. And earlier this year Epic released the Samaritan tech demo - a real-time video designed to provide a glimpse into what the next generation of consoles will be capable of.
<Type Name="NSControlText" FullName="MonoMac.AppKit.NSControlText"> <TypeSignature Language="C#" Value="public delegate bool NSControlText(NSControl control, NSText fieldEditor);" /> <TypeSignature Language="ILAsm" Value=".class public auto ansi sealed NSControlText extends System.MulticastDelegate" /> <AssemblyInfo> <AssemblyName>MonoMac</AssemblyName> <AssemblyVersion>0.0.0.0</AssemblyVersion> </AssemblyInfo> <Base> <BaseTypeName>System.Delegate</BaseTypeName> </Base> <Parameters> <Parameter Name="control" Type="MonoMac.AppKit.NSControl" /> <Parameter Name="fieldEditor" Type="MonoMac.AppKit.NSText" /> </Parameters> <ReturnValue> <ReturnType>System.Boolean</ReturnType> </ReturnValue> <Docs> <param name="control">To be added.</param> <param name="fieldEditor">To be added.</param> <summary>To be added.</summary> <returns>To be added.</returns> <remarks>To be added.</remarks> </Docs> </Type>
Q: Cannot telnet host from ec2 instance behind a proxy Let me try to rephrase the whole question as it was not very clear the first time. I need to understand and workout port forwarding. I have an ec2 instance running behind a proxy server. I need to telnet from my ec2 instance to a server outside, 'in internet' through port (let's say) 1919 . In security groups I have allowed tcp traffic on port 1919 from my ec2 instance to the proxy and back and from proxy to the remote server. To be able to telnet from my ec2 instance to remote server, i think i will have to port forward 1919 in proxy to be able to get to the remote server. I tried it with iptables -A PREROUTING -t nat -i eth0 -p tcp --dport 9093 -j DNAT --to *ipaddofEC2*:1919 iptables -A FORWARD -p tcp -d --dport 1919 -j ACCEPT In my squid I have : acl servicebus_port_9093 port 1919 and http_access allow allowed_source_hosts allowed_messaging_sites servicebus_port_9093 and acl allowed_messaging_sites dstdomain .servicebus.windows.net(remote host's url finishes with windows.net) Still I have not been able to telnet from my ec2 to remote server. Ps. ip forwarding is on in proxy and I tried to tcpdump in proxy while trying to telnet from my ec2 instance to remote server but I get nothing. (I dont know much about tcpdump) I get timeout error. A: The protocol used was not tcp but a 'special' protocol that's why I was not able to tunnel the traffic through.
Q: How to convert a text in xml to a hyperlink in html using xslt How to convert a text in xml to a hyperlink in html using xslt. my Xml code is <Steps> <Filepath>C:\Test\Capture\050615165617TC001_05_06_1516_57_11.png</Filepath> </Steps> to convert it into html my xslt code looks like <td width='15%'> <xsl:element name="a"> <xsl:attribute name="href"> <xsl:value-of select="./Filepath"/> </xsl:attribute> <xsl:value-of select="./Filepath"/> </xsl:element> </td> Now this code writes the entire path of the file in html, but I want to write only "File" in the html with the hyperlink to the location of the file. My current generated html code is given below C:\Test\Capture\050615165617TC001_05_06_1516_57_11.png <td width="15%"><a href="C:\Test\Capture\050615165617TC001_05_06_1516_57_11.png">C:\Test\Capture\050615165617TC001_05_06_1516_57_11.png</a></td> What I want is <td width="15%"><a href="C:\Test\Capture\050615165617TC001_05_06_1516_57_11.png">File</a></td> Can anyone help me what change I need to do in xslt. A: You are telling it to have the value: <xsl:element name="a"> <xsl:attribute name="href"> <xsl:value-of select="./Filepath"/> </xsl:attribute> <xsl:value-of select="./Filepath"/> <!--This is the link text --> </xsl:element> So change it to: <xsl:element name="a"> <xsl:attribute name="href"> <xsl:value-of select="./Filepath"/> </xsl:attribute> File </xsl:element>
The relationship of peak inspiratory airflow to subjective airflow in the nose. A prospective study of 145 patients presenting for nasal surgery was designed to examine the relationship of peak nasal inspiratory airflow (PNIF) to subjective nasal airway patency (SNA). Patients admitted for surgery filled in a self-assessment Questionnaire and PNIF measurements were taken. Six weeks post surgery the procedure was repeated. The patients were divided into four operative groups and the results analysed accordingly. Improvement in SNA was noted in 80-96% of patients and PNIF improved in 60-83%. However, PNIF decreased in 17-40% of patients who felt they were improved. Statistical analysis using Spearman's Rank Correlation test showed no correlation between SNA and PNIF. Despite the well recognized problems of assessing subjective sensation of nasal airflow, we feel this study casts doubt on the usefulness of PNIF in routine clinical practice.
/* --------------------------------------------------------------------- * * Copyright (C) 2018 - 2020 by the deal.II authors * * This file is part of the deal.II library. * * The deal.II library is free software; you can use it, redistribute * it, and/or modify it under the terms of the GNU Lesser General * Public License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * The full text of the license can be found in the file LICENSE.md at * the top level directory of deal.II. * * --------------------------------------------------------------------- */ // Document a hang in Triangulation::prepare_coarsening_and_refinement() when // periodic boundaries and mesh smoothing is used. // The (now fixed bug) was caused by eliminate_refined_boundary_islands also // incorrectly acting on periodic boundaries. This test originally comes from // ASPECT. #include <deal.II/base/conditional_ostream.h> #include <deal.II/distributed/tria.h> #include <deal.II/dofs/dof_tools.h> #include <deal.II/fe/fe_q.h> #include <deal.II/fe/fe_system.h> #include <deal.II/fe/mapping_q1.h> #include <deal.II/grid/grid_generator.h> #include <deal.II/grid/grid_tools.h> #include <deal.II/numerics/data_out.h> #include "../tests.h" template <int dim> void test() { MPI_Comm mpi_communicator = MPI_COMM_WORLD; const unsigned int n_mpi_processes = Utilities::MPI::n_mpi_processes(mpi_communicator); const unsigned int this_mpi_process = Utilities::MPI::this_mpi_process(mpi_communicator); const double L = 20; dealii::parallel::distributed::Triangulation<dim> triangulation( mpi_communicator, typename Triangulation<dim>::MeshSmoothing( Triangulation<dim>::limit_level_difference_at_vertices | Triangulation<dim>::eliminate_unrefined_islands | Triangulation<dim>::eliminate_refined_inner_islands | Triangulation<dim>::eliminate_refined_boundary_islands | Triangulation<dim>::do_not_produce_unrefined_islands)); GridGenerator::hyper_cube(triangulation, 0, 1, /*colorize*/ true); std::vector< GridTools::PeriodicFacePair<typename Triangulation<dim>::cell_iterator>> periodicity_vector; if (true) for (int d = 0; d < dim; ++d) GridTools::collect_periodic_faces(triangulation, /*b_id1*/ 2 * d, /*b_id2*/ 2 * d + 1, /*direction*/ d, periodicity_vector); triangulation.add_periodicity(periodicity_vector); // refine mesh triangulation.refine_global(3); // mark all cells except these for coarsening: std::vector<Point<dim>> points = {{312500, 93750}, {312500, 156250}, {62500, 343750}, {312500, 281250}, {312500, 343750}, {62500, 406250}, {62500, 468750}}; // rescale to [0,1]^2 for this to work: for (auto &p : points) { p[0] /= 1e6; p[1] /= 5e5; } for (auto &cell : triangulation.active_cell_iterators()) { cell->set_coarsen_flag(); for (auto &p : points) if (cell->point_inside(p)) { cell->clear_coarsen_flag(); cell->set_refine_flag(); } } triangulation.execute_coarsening_and_refinement(); deallog << "number of elements: " << triangulation.n_global_active_cells() << std::endl; // create dof_handler FESystem<dim> FE(FE_Q<dim>(QGaussLobatto<1>(2)), 1); DoFHandler<dim> dof_handler(triangulation); dof_handler.distribute_dofs(FE); // write mesh for visualization DataOut<dim> data_out; data_out.attach_dof_handler(dof_handler); Vector<float> subdomain(triangulation.n_active_cells()); for (unsigned int i = 0; i < subdomain.size(); ++i) subdomain(i) = triangulation.locally_owned_subdomain(); data_out.add_data_vector(subdomain, "subdomain"); data_out.build_patches(); data_out.write_vtu_in_parallel(std::string("mesh.vtu").c_str(), mpi_communicator); IndexSet locally_relevant_dofs; DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs); IndexSet locally_active_dofs; DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs); const std::vector<IndexSet> locally_owned_dofs = Utilities::MPI::all_gather(MPI_COMM_WORLD, dof_handler.locally_owned_dofs()); std::map<types::global_dof_index, Point<dim>> supportPoints; DoFTools::map_dofs_to_support_points(MappingQ1<dim>(), dof_handler, supportPoints); /// creating combined hanging node and periodic constraint matrix AffineConstraints<double> constraints; constraints.clear(); constraints.reinit(locally_relevant_dofs); for (int d = 0; d < dim; ++d) DoFTools::make_periodicity_constraints( dof_handler, 2 * d, 2 * d + 1, d, constraints); const bool consistent = constraints.is_consistent_in_parallel(locally_owned_dofs, locally_active_dofs, mpi_communicator, /*verbose*/ true); deallog << "Periodicity constraints are consistent in parallel: " << consistent << std::endl; DoFTools::make_hanging_node_constraints(dof_handler, constraints); const bool hanging_consistent = constraints.is_consistent_in_parallel(locally_owned_dofs, locally_active_dofs, mpi_communicator); deallog << "Hanging nodes constraints are consistent in parallel: " << hanging_consistent << std::endl; constraints.close(); deallog << "OK" << std::endl; } int main(int argc, char *argv[]) { Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv); MPILogInitAll log; test<2>(); }
As Christmas Day approaches, I am readying myself for 'Ghar Wapsi'. Normally it would be celebration time, but there has been a dramatic shift in interest and emphasis, what with the mass re-conversions planned on that day at Aligarh and the observance of Good Governance Day in schools across the country! A BJP vice president (for the state of Maharashtra) stays in my building and I meet him quite often in the elevator or in the building compound during my morning walk. When I told him that I was ready for 'Ghar Wapsi' he was visibly excited. I asked him what caste I would be assigned to on reconversion, but he said he would have to find out. After a brief pause he wanted to know the caste to which my Hindu ancestors belonged. I mentioned that they were Saraswats, so he opined that I would revert to that caste. In reply, I asked him how he could be so sure since my forebears had eaten all sorts of prohibited meat. That stumped him and he reverted to his stance of the need to consult someone about the same. It is a historical fact that Hindus of different castes were converted by Portuguese missionaries 400 to 500 years ago at a time when faith dominated political discourse. Some sort of coercion must necessarily have been used. The Ottoman Turks had done the same in the Balkans despite resistance from the Austro Hungarians and Czarist Russia. The detritus of those conversions still linger in the surcharged atmosphere of embattled Bosnia and neighbouring Kosovo. Religion has always been used as a tool by State players from the dawn of history. It has led to numerous religious wars, the most prominent of which were the Crusades which pitted the Muslims against the Christians in Jerusalem and the Middle East. Everyone forgets that religions did not exist when man first appeared on Earth. Different theories were put forth by different people in different corners of the world to explain the mystery of life and death, of the wonders of nature and events that could not be explained by human intellect. Different theories were soon turned into beliefs and ultimately into religions. Fortunately, all religions speak of the necessity of being just and good and condemn that which militates against peace and order in society. Five centuries ago, in the Age of Faith, Christian missionaries just like Arab traders and Ottoman soldiers went around the world propagating a one true religion. That epoch has long gone but diehards abound and they in turn have given birth to extremists in more moderate religions like Hinduism. Mass conversions disturb social order and need to be discouraged. Individuals seeking spiritual solace in another religion different from the one in which they were born is common all over the world. In Portugal I came across descendents of those who converted my forefathers who had now adopted Krishna as their preferred god! There can be no justification for killing innocent people in the name of God and no plausible reasons for hating individuals who hold beliefs about God that are different from theirs. Unfortunately, we are now confronted with 'Ghar Wapsi' which is just an excuse for divisive politics and is not going to help the country to advance. I have no quarrel with the concept of Hindutva or cultural nationalism as the RSS describes it. As I have said earlier, I admit that my ancestors were Hindus and incidentally I have never failed to proclaim it. Probably, because my forefathers belonged to the higher varna of Hindu society as compared to many other compatriots who had been liberated from an inferior status ascribed to them in the Hindu social order. Here's a historical tidbit which most Goan Christians, including priests, do not know will be of interest to them as well as to our Hindu brothers in Goa. The first Goan Catholic Priest to be ordained Bishop (of Bijapur) was Mateus de Castro of Chorao in Diwad. He insisted on being ordained as Mateus de Castro Mahale because that was the Hindu surname of his family prior to conversion. The Priest who baptized his forefathers was a Castro and like all Goans (even East Indians from Mumbai and Vasai) converted en masse they were given the surname of the priest who baptized them. Portuguese surnames are to be found all along the coast of India and even Sri Lankan test cricketers Mendis, Pereira, Vas and De Silva are Sinhalese whose ancestors were converted by the Portuguese. I have also come across a Ribeiro in Quilon in Kerala who spoke only Malayalam. So, as I await December 25, the day ear-marked for 'Ghar Wapsi', I am trying to figure out what my old Hindu surname was. I will have to go to Goa, to my taluka of Bardez, to my village of Socorro and diligently dig out old Church records. But the Sangh Parivar is giving me very little time to do that. They are obviously in a mighty hurry to reconvert 23 million Indians so that they become first-class citizens like them! But will they really be accepted after reconversion? That is certainly difficult to predict. (The author is former commissioner of police, Mumbai and ex-DGP, Punjab)
Q: What do these deprecated markings mean? I found this Moog Prodigy synthesizer schematic that I'm trying to figure out how it works in order to learn more about how electronics work. What do the circled symbol at the top of the schematic mean ? What does "N.C" mean ? An example of it is located at the far right. What does the "*" next to R2 mean ? What does the "1%" next to R5 mean ? What does -9V and -12V mean ? What does the arrow next to R14 mean ? A: The item in the top left is a 2-pole switch that has 3 positions (32', 16', 8'). In the top right, NC means no connection. The 1% is the tolerance of resistor R5. R14 is a potentiometer (variable resistor). The arrow is the wiper arm of R14. The -12V is the voltage powering that part of the circuit. The voltage (-9.0V) at the output of U2B is either for purposes of troubleshooting or for an adjustment. The meaning of the star above R2 is probably explained in a note somewhere else on the schematic.
MP support boosts RCN nurse prescribing lobby. The Government is expected to announce plans for nurse prescribing legislation later this year.
1. Field of the Invention This invention relates to an anode coated with .beta.-lead dioxide which has dimensional stability and excellent adhesion between the .beta.-lead dioxide coating layer and a titanium substrate. More particularly, this invention relates to an anode coated with .beta.-lead dioxide which comprises a titanium substrate on at least one side of which both titanium expanded metal and titanium reinforcing bars are welded, at least one medium layer (i.e., intermediate layer) being applied on said substrate, said intermediate layer consisting either of an alloy or an oxide of platinum group metals, and a .beta.-lead dioxide coating layer being applied on the said intermediate layer. 2. Description of the Prior Art An anode for use in electrolytic industries should have not only excellent anodic characteristics and corrosion resistance, but also excellent workability, mechanical strength and dimensional stability. Recently, the use of coated metallic anodes in the chlor-alkali industry has been proposed. Such coated metallic anodes have excellent anodic characteristics, corrosion resistance, and dimensional stability and have brought about a number of remarkable improvements in the chlor-alkali industry. However, these anodes have disadvantages in that they are expensive and have relatively short life when they are used under oxygen generation. On the other hand, the .beta.-lead dioxide anode has been known from old times, and has been recognized to have high oxygen overvoltage characteristics, high corrosion resistance, peculiar catalytic properties and economic advantages resulting from its low manufacturing cost. However, the .beta.-lead dioxide anode has not been adapted very widely in the electrolytic industy in spite of its excellent characteristics. This is due mainly to its lack of mechanical strength, dimensional stability, and workability. Planar .beta.-lead dioxide anodes have conventionally been manufactured by anodically electrodepositing .beta.-lead dioxide on a concave surface of a substrate from a lead nitrate bath, and then by detaching .beta.-lead dioxide layer from the substrate. Such anodes are not free from strong internal stresses and hence are very brittle, thus limiting the size and shape that can be manufactured and substantially reducing their practical value in industry. The attempt has been made to electrodeposit .beta.-lead dioxide on a graphite or on a flash coated platinum which is coated on titanium substrate (See U.S. Pat. No. 3,207,679). This was not very successful, however, because .beta.-lead dioxide is apt to detach from the substrate because of low adhesion and the growth of cracks, and such anodes were too heavy for easy handling. More recently, the manufacture of a .beta.-lead dioxide anode by electrodepositing .beta.-lead dioxide on a screen was tried (See U.S. Pat. No. 2,872,405). The adhesion of the coating layer to the anode was better, but it was difficult to obtain even plates. Thus, the surface was uneven, the dimensional stability was insufficient, and it was impossible to obtain even anodes of the substantial dimensions required in industry. In addition, difficulty was encountered in applying a current connection to the anode. Accordingly, an anode coated with .beta.-lead dioxide is not satisfactory for extended use in the electrolytic industries, mainly because of the lack of mechanical strength, and dimensional stability and the difficulty of manufacturing large anodes.
Q: Why atropisomers are called conformers? Changing one atropisomer to another requires bond breaking (in some cases the removal and reattachement of steric groups, according to my understanding) so how it is possible that according to IUPAC atropisomers are classified as conformers (conformational isomers)? According to IUPAC: Atropisomers - A subclass of conformers which can be isolated as separate chemical species and which arise from restricted rotation about a single bond. Conformers - One of a set of stereoisomers, each of which is characterized by a conformation corresponding to a distinct potential energy minimum. Conformation - The spatial arrangement of the atoms affording distinction between stereoisomers which can be interconverted by rotations about formally single bonds. Some authorities extend the term to include inversion at trigonal pyramidal centres and other polytopal rearrangements. This definition of "conformation" seems vague to me, but it is often said that changing one conformation to another involves no bond-breaking and changing one configuration to another requires bond-breaking. This is the difference between them. IUPAC gives examples of atropisomerism: e.g. (E)-cyclooctene (given in Gold Book "planar chirality" entry). This definitely should involve bond-breaking. Other IUPAC examples of atropisomerism include substituted ortho-biphenyls. A: I think your notation of "Changing one atropisomer to another requires bond breaking (in some cases the removal and reattachement of steric groups, according to my understanding)" is not correct. To my knowledge, the free rotation of one single bond of an atropisomer is not restricted by connecting bond(s). I think you get confused by some literature explaining breaking a bond selectively directed to pair of atropisomers (e.g., Ref.1): According to your post, IUPAC definition of atropisomers: Atropisomers - A subclass of conformers which can be isolated as separate chemical species and which arise from restricted rotation about a single bond. A variety of sources confirm this definition. For instance, Ref.2 and 3 described atropisomers as: Atropisomers are stereoisomers (rotamers) resulting from hindered rotation about single bonds (having an energy barrier to rotation about a single $\sigma$ bond) usually due to steric hindrance. This energy barrier to rotation is high enough to allow the isolation of the conformers (rotamers). Nonetheless, the name, coined by German biochemist Richard Kuhn in application to a theoretical concept 1n 1933 (Wikipedia) is derived from Greek (a = not and tropos = turn; thus, atropos meaning "without turn"), which supported the definition. However, the atropisomerism was first detected in 6,6'-dinitro-2,2'-diphenic acid by Cristie, et al. in 1922 (Ref.4): It is also important to know the nomenclature of atropisomers (Wikipedia): Determining the axial stereochemistry of biaryl atropisomers can be accomplished through the use of a Newman projection along the axis of hindered rotation. The ortho, and in some cases meta substituents are first assigned priority based on Cahn–Ingold–Prelog priority rules (Ref.5 & Ref.6). One scheme of nomenclature in based on envisioning the helicity defined by these groups (Ref.7). Starting with the substituent of highest priority in the closest ring and moving along the shortest path to the substituent of highest priority in the other ring, the absolute configuration is assigned $P$ or $\Delta$ for clockwise and $M$ or $\Lambda$ for counterclockwise. Alternately, all four groups can be ranked by Cahn–Ingold–Prelog priority rules, with overall priority given to the groups on the "front" atom of the Newman projection. The two configurations are termed $R_\mathrm{a}$ and $S_\mathrm{a}$ in analogy to the traditional $R/S$ for a traditional tetrahedral stereocenter (Ref.8}: Atropisomers are also found in nature. The main pigment of cotton seeds, gossypol is an atropisomer, which exists in both form of $(R_\mathrm{a})$- and $(S_\mathrm{a})$-conformations. Most commercial Upland (Gossypium hirsutum) cottonseeds have an $(R_\mathrm{a})$- to $(S_\mathrm{a})$-gossypol ratio of ~$2:3$, but some Pima (Gossypium barbadense) seeds have an excess of $(R_\mathrm{a})$-gossypol. Between two isomers, $(R_\mathrm{a})$-gossypol is more toxic and exhibits significantly greater anticancer activity than its $(S_\mathrm{a})$-atropisomer: Two fun facts about atropisomers are (KU.edu): Atropisomers are detectable by $\mathrm{NMR}$ if half lives exceed $\pu{10^{-2} s}$. Atropisomers are isolatable if the half-life is above $\pu{10^3 s}$. References: Gerhard Bringmann, Thomas Hartung, “Atropo-enantioselective biaryl synthesis by stereocontrolled cleavage of configuratively labile lactone-bridged precursors using chiral H-nucleophiles,” Tetrahedron 1993, 49(36), 7891-7902 (https://doi.org/10.1016/S0040-4020(01)88014-5). Alan R. Katritzky, Christopher A. Ramsden, John A. Joule, Viktor V. Zhdankin, In Handbook of Heterocyclic Chemistry, Third Edition; Elsevier Limited: Amsterdam, The Netherlands, 2010 (ISBN: 978-0-08-095843-9). Jonathan Clayden, “Atropisomerism,” Tetrahedron 2004, 60(20), 4335 (https://doi.org/10.1016/j.tet.2004.03.002). George Hallatt Christie, James Kenner, "LXXI.—The molecular configurations of polynuclear aromatic compounds. Part I. The resolution of $\gamma$-6 : 6′-dinitro- and 4 : 6 : 4′ : 6′-tetranitro-diphenic acids into optically active components," Journal of the Chemical Society, Transactions 1922, 121, 614–620 (https://doi.org/10.1039/CT9222100614). R. S. Cahn, Christopher Ingold, V. Prelog, “Specification of Molecular Chirality,” Angew. Chem. Internat. Ed. Engl. 1966, 5(4), 385-415 (https://doi.org/10.1002/anie.196603851) and Corrigendum: Angew. Chem. Internat. Ed. Engl. 1966, 5(5), 511-511 (https://doi.org/10.1002/anie.196605111). G. P. Moss, "Basic terminology of stereochemistry: IUPAC Recomendations 1996," Pure and Applied Chemistry 1996, 68(12), 2193-2222 ()(PDF). http://goldbook.iupac.org/terms/view/H02763: IUPAC. Compendium of Chemical Terminology, 2nd ed. (the "Gold Book"). Compiled by A. D. McNaught and A. Wilkinson. Blackwell Scientific Publications, Oxford (1997). Online version (2019-) created by S. J. Chalk. ISBN 0-9678550-9-8. https://doi.org/10.1351/goldbook. http://goldbook.iupac.org/terms/view/A00547: IUPAC. Compendium of Chemical Terminology, 2nd ed. (the "Gold Book"). Compiled by A. D. McNaught and A. Wilkinson. Blackwell Scientific Publications, Oxford (1997). Online version (2019-) created by S. J. Chalk. ISBN 0-9678550-9-8. https://doi.org/10.1351/goldbook.
South Korea suggests joint probe with North Korea on shooting of South Korean official South Korea urged North Korea on Saturday to further investigate the fatal shooting of a South Korean fisheries official and suggested it could be an unprecedented joint probe by the two sides, as public and political outrage over the killing grew.
2012-07-29T20:53:01+08:00http://diary.archangelsdy.com/Octopress2012-07-29T20:00:00+08:00http://diary.archangelsdy.com/blog/2012/07/29/cisco-ipsec-vpn-for-debianUnsatisfied with OpenVPN’s low speed to connect, I tried to setup a Cisco IPSec VPN recently. Its awesome speed proves to be a right choice. Here are brief steps on Debian. Reference ]]>2012-04-24T00:10:00+08:00http://diary.archangelsdy.com/blog/2012/04/24/python-uPython will buffer stdout and stderr by default. If you want to redirect the output into a separate file, use ‘python -u’ instead. ]]>2012-04-22T16:43:00+08:00http://diary.archangelsdy.com/blog/2012/04/22/crontab-tips
Deterministic Nanoassembly of Quasi-Three-Dimensional Plasmonic Nanoarrays with Arbitrary Substrate Materials and Structures. Guided manipulation of light through periodic nanoarrays of three-dimensional (3D) metal-dielectric patterns provides remarkable opportunities to harness light in a way that cannot be obtained with conventional optics yet its practical implementation remains hindered by a lack of effective methodology. Here we report a novel 3D nanoassembly method that enables deterministic integration of quasi-3D plasmonic nanoarrays with a foreign substrate composed of arbitrary materials and structures. This method is versatile to arrange a variety of types of metal-dielectric composite nanoarrays in lateral and vertical configurations, providing a route to generate heterogeneous material compositions, complex device layouts, and tailored functionalities. Experimental, computational, and theoretical studies reveal the essential design features of this approach and, taken together with implementation of automated equipment, provide a technical guidance for large-scale manufacturability. Pilot assembly of specifically engineered quasi-3D plasmonic nanoarrays with a model hybrid pixel detector for deterministic enhancement of the detection performances demonstrates the utility of this method.
Karin Zarifi (Netherlands, 2002, Law) “I am convinced that my experience at HLS gives me access to any career I may dream of. (…) Even though I am thrilled for now, I do anticipate wanting to change careers in the future and trying my luck again at a career in Europe, perhaps even with an international organization. One never knows where life may lead us, but I do know that Harvard takes it to great places.”
Pulis relishing Holloway meeting Stoke, Latest news update: January 04, 2013 3:46 PM[[views]] Tony Pulis will face one of his oldest friends in football when Stoke travel to Crystal Palace in the third round of the FA Cup. Pulis and Palace boss Ian Holloway have known each other for almost 30 years since their youth days at Bristol Rovers and Holloway is godfather to Pulis' son Anthony. "I've known Ian since he was 12 years old," said the Stoke boss. "His mum and dad were lovely people and we knew their family really well. They're typical Bristolians really, and it's lovely to see him do so well. "He's done fantastic, not just as a manager but his playing career was fabulous, especially the back end of it when he was playing for QPR in the Premier League." The pair were in opposite dugouts twice in the 2010/11 season when Blackpool, who Holloway managed until joining Palace in November, spent a year in the Barclays Premier League. Holloway had the upper hand on those occasions, with Blackpool picking up four points and Stoke one, and he could well return to the top flight in August. Palace sit third in the npower Championship table, two points adrift of the automatic promotion places, but Pulis was keen to praise the work done by former manager Dougie Freedman. He said: "I think Ian will be the first one to say this, you've got to give the previous manager a lot of credit for putting the team together. "Ian's gone in there and I think the majority of it has been the same, Ian's just kept it going. But if they get promoted, it'll be a fantastic achievement for Ian."
Antitumor activity and pharmacodynamic properties of PX-478, an inhibitor of hypoxia-inducible factor-1alpha. The hypoxia-inducible factor-1 (HIF-1) transcription factor is an important regulator of tumor response to hypoxia that include increased angiogenesis, glycolytic metabolism, and resistance to apoptosis. HIF-1 activity is regulated by the availability of the HIF-1alpha subunit, the levels of which increase under hypoxic conditions. PX-478 (S-2-amino-3-[4'-N,N,-bis(2-chloroethyl)amino]phenyl propionic acid N-oxide dihydrochloride) is an inhibitor of constitutive and hypoxia-induced HIF-1alpha levels and thus HIF-1 activity. We report that PX-478 given to mice suppresses HIF-1alpha levels in HT-29 human colon cancer xenografts and inhibits the expression of HIF-1 target genes including vascular endothelial growth factor and the glucose transporter-1. PX-478 shows antitumor activity against established (0.15-0.40 cm(3)) human tumor xenografts with cures of SHP-77 small cell lung cancer and log cell kills up to 3.0 for other tumors including HT-29 colon, PC-3 prostate, DU-145 prostate, MCF-7 breast, Caki-1 renal, and Panc-1 pancreatic cancers. Large (0.83 cm(3)) PC-3 prostate tumors showed 64% regression, which was greater than for smaller tumors. The antitumor response to PX-478 was positively correlated with tumor HIF-1alpha levels (P < 0.02) and was accompanied by massive apoptosis. The results show that PX-478 is an inhibitor of HIF-1alpha and HIF-1 transcription factor activity in human tumor xenografts and has marked antitumor activity against even large tumor xenografts, which correlates positively with HIF-1alpha levels.
Payment Successful bidders will be emailed an invoice the Tuesday after the auction which will include shipping info. Please do not try to pay until receiving your invoice. Online invoices cannot even be created until the entire auction is closed out and then shipping must be added in manually. Please make arrangements to have pay your invoice within 5 working days as Slotin Auction needs to collect quickly in order to pay consignors. Accepted payment methods Invoicing This is a two-day sale and the shipping prices must be added in manually at the end of the sale. The invoices will be emailed to you by Tuesday after the sale (May 1st). There will be a separate invoice sent for each day of the sale (April 28th sale) and (April 29th sale). Terms NOTE: Mail checks to: 5619 Ridgetop Drive Gainesville, GA 30504 Auction Hall: 112 E. Shadburn Ave. Buford, GA 30518 Slotin Folk Art Auction - GAL #2864 & #2784 TERMS AND CONDITIONS OF SALE 1. All sales are final. 2. Only Bidders with an online BID HISTORY (or who are to known to Slotin Auction) will be approved to bid online for this auction. All other bidders will have to contact Slotin Auction directly to bid. 3. PLEASE DO NOT TRY TO PAY FOR YOUR PURCHASES UNTIL AFTER RECEIVING THE FINAL INVOICE WITH SHIPPING INFO. Internet bidders will be emailed invoices with shipping prices by Tuesday after the auction (Nov. 14). Invoices are not even produced until the whole auction is closed out and shipping must be added in manually. 4. Buyers pay Slotin Folk Art Auction premium of 25% of the Lot's Hammer price. 5. Slotin will accept checks and credit cards. (The "Sales Price" is the Hammer Price plus the Buyer's Premium, plus applicable taxes). 6. Title to Lot and risk of loss or damage to the Lot passes to the Buyer when the Hammer falls. Slotin Auction shall have no liability for any damage to property left on its premises after the date of sale. Buyers with an outstanding balance after 14 days of the close of the auction will be charged a ten percent (10%) late fee on the Sales Price. Slotin will have sole and absolute discretion to determine who the successful bidder is and/or may withdraw the Lot or re-offer the Lot for sale, in the event of a dispute. 7. All property is sold "AS IS." All items are on public display for buyer preview prior to and during the auction and bidders are encouraged to inspect pieces before bidding. Call or email for condition reports the week of the sale: 404 403-4244 or auction@slotinfolkart.com. Slotin, for itself and as agent for the seller, makes no warranties or representations of any kind with respect to any Lot. Buyer agrees that in no event shall Slotin be responsible for the correctness, description, genuinness, authorship, attribution, provenance, period, culture, source, origin, value or condition of any Lot. Nothing being said or done by Slotin shall be deemed a warranty of representation or an assumption of liability by Slotin. 8. Bidding on any Lot is acceptance of these Terms and Conditions and any and all other terms announced at the time of sale. These Conditions constitute the entire agreement between bidders and Slotin and supersede all prior agreements between them, if any. 9. SHIPPING: Slotin Folk Art Auction will be glad to pack and ship your auction purchases for you. We ship via UPS Ground. The shipping charges are listed in the catalog for Continental U.S. shipments only. Insurance is optional AND may be purchased for an additional 2%. The shipping costs will be are listed with each item in the catalog description and will be included in your emailed invoice the day after the auction. Please allow 3-4 weeks for delivery - especially on the larger items! International packages will be handled by the UPS Store. 10. BIDDING LIVE ONLINE DURING THE AUCTION: Online bidders may submit absentee bids in advance of the sale or bid live along with the audience during the sale. If you are experiencing trouble bidding online and would like a live phone line, call: 404 403-4244. Of course, the best way to assure getting the desired pieces, is to attend the sale in person. Since this is not possible for all bidders, the next most reliable ways to bid are to be live on the phone or to submit an absentee bid through the auction house. Bids executed in this manner are almost 100% accurate. The process of bidding live on the internet is convenient, but not perfect. There is a live person typing the auction info in as quickly as she can while executing thousands of live internet bids. She will attempt to capture the bid history as accurately as possible, but in the event of a discrepancy, the auctioneer's records are the official records of the auction. In every auction of this size there will be a couple of online bidders who will be declared the online bidder when in fact it is an in house bidder or phone bidder who has won. This is simply the result of the fact that a human is typing in the bids and accepting online bids and in a long auction day, with a thousands of bids executed correctly, she will inevitably miss a bid that should go to the online person or accidentally accept a bid when it should go to an in house person. Generally this happens to 3-5 bidders in an auction. We post the official sales results on our website the week after the auction. Bidders may place absentee bids or bid in real time along with our audience via LIVEAUCTIONEERS. If you place an absentee bid in advance of the auction, your bid will be executed competitively along with the audience on auction day. We as the auction house never get to see your maximum bid, only the increments that are submitted by liveauctioneers. Sometimes, an item can close before an increment by an online bidder is submitted and we are unaware that the bidder would have gone higher to obtain the piece. Since this is a live auction, bids placed in advance of the auction are not given any more priority than bids submitted when the lot comes up for sale. Absentee bids are submitted in increments by LIVEAUCTIONEERS along with the live audience. The Buyer's commission is 25%. You may view the auction live for no fee.
ST. PETERSBURG, Fla. (AP) - Matt Moore became the first Rays pitcher to win his first four starts of a season, Ryan Roberts homered twice and Tampa Bay beat CC Sabathia and the New York Yankees 5-1 on Monday night. Moore (4-0) allowed one run, two hits, three walks and had nine strikeouts over eight innings. Roberts connected in the first and third to help the Rays take an early 5-0 lead. Sabathia (3-2) had a three-game winning streak end. The left-hander gave up five runs and seven hits over seven innings. Tampa Bay has won 12 of its last 14 home games against the Yankees dating back to July 2011. Kyle Farnsworth completed the two-hitter.
April is Donate Life Month Alright alright. Not the most uplifting topic, I know. But it can be. And I want to educate you on the importance and relevance of this topic, as well as encourage discussion between your family on the topic of…organ donation. Before you stop reading, PLEASE just hear me out. I understand this is a topic that most people don’t like to think about. But, it can be even more difficult when you’re faced with a decision down the road that you haven’t put any thought into. I have a lot of experience seeing this decision emotionally destroy families because it was something they had never talked about. And because of the lack of conversations about organ donation, thousands of lives are lost that could have been saved. Lets start with some stats. More than 119,000 Americans are waiting for a life-saving transplant. Every ten minutes, someone is added to this list. And unfortunately, 8000 Americans a year (22/day) die waiting for a transplant. About 54% of Americans are registered organ donors. We need that percentage to go up. Are you in that 54%? So here comes the truth. YOU….die. Yes, its true. You actually will die. Very weird to think about, I know. But it happens, and it happens to every one of us. Sometimes it will happen young, hopefully it happens when we’re older. But that’s not our decision to make. What is our decision, is what we do with our bodies after death, and how we choose to manage our bodies when we are hospitalized. My hospital lights up in pretty blue and green lights for the month of April, in honor of Donate Life month 🙂 As an ICU nurse, I see death a lot. It’s never easy, and it’s always hard to comfort families during this difficult time. One thing you can do now to decrease that stress is to have a conversation about your end-of-life preferences. One of the biggest issues families face when a family member is dying, is that they never talked about what their preferences were. This is a conversation that should happen in the home, not in the ICU. You think – Oh, I’ll just talk about those issues when the time comes. Well guess what. Majority of the time, nobody knows when they’re going to get sick! Or hit by a car. Or have a heart attack. Or fall off a cliff (cough). And if you’re unable to speak? Does your family know what you would want in that situation? Are you comfortable with a ventilator helping you breathe, a feeding tube in your stomach? CPR? (which FYI = hard chest compressions possibly breaking ribs and causing more complications if you survive). This is TOUGH STUFF! Stuff that no one normally thinks about. You need to contemplate what you would want if you do end up that sick. And if you don’t make it. I’m really not trying to scare you, I just want you to be able to make that choice, so your family doesn’t have to. Okay, so if you’re still with me here…I promise it’s going to get more positive! I am writing this post as National Healthcare Decisions Week is April 16-22, in addition to Donate Life month being April. One more stat: 90% of people agree that discussing end-of-life decisions with family is important. 27% have actually done so. That means that 73% of people are leaving a major decision up in the air; which is so distressing to family members when the doctor asks them: “what would they have wanted?” And unfortunately, when family doesn’t know the patient’s choices, they usually opt out of organ donation, which could give life to others who do not have the diagnosis to survive without it. Here’s some organ donation info: ANYONE can potentially be an organ donor. It depends on your diagnosis and medical history at the time. Don’t let any past medical history keep you from registering. Register, and the physicians will decide at the appropriate time. All major religions support organ donation as a compassionate choice. Organ donation is free to the donor. You can still have an open casket funeral. Donation is only considered AFTER brain death is declared. A lot of people worry that your medical care will be different if you are a registered donor. Your life is the number one priority of medical staff when you are a patient. They will do everything and anything they can to keep you alive, registered donor or not. The conversation to donate is only an option and brought up AFTER you have been declared brain dead, which is irreversible. Nobody will give up on you just so they can get your organs. It’s not like that at all. I hope all of my readers out there live long, happy and healthy lives. The truth is, all of us will die eventually. Do you want to just die, or live on through the most generous gift of all? Register to be a donor, and save up to 8 lives. Error: Access Token is not valid or has expired. Feed will not update. This error message is only visible to WordPress admins There's an issue with the Instagram Access Token that you are using. Please obtain a new Access Token on the plugin's Settings page.If you continue to have an issue with your Access Token then please see this FAQ for more information.
The Strength of the Pack is the Wolf East's Top-Scoring Reserve Looks to Expand His Game When you’ve been labeled “a wolf” by your head coach, it’s a tough nickname to live down. Jordan Clarkson just flashes a shy smile and looks away when he’s confronted with his new nom de guerre. But he knows that his game fits the lupine bill – and he’ll be ready to bark at the moon when the regular season rolls around in exactly two weeks. The origin story unfolded on just the third day of Training Camp. With rookie Dylan Windler on the shelf through the preseason, Coach John Beilein made the decision to move Clarkson over to play some 3 when he goes with a small backcourt. ”You've got to have a wolf mentality as a small forward in some of the things we do,” praised Beilein. “There's not a better wolf on the team than Jordan Clarkson. He's huntin'. And we need a hunter at that position.” The Clarkson experiment worked well in the Cavs preseason opener against San Lorenzo. Although Clarkson seemed even less concerned about the position label than his head coach. ”I’m just out there, trying to play-make and score – that’s my job,” smiled the sixth-year man. “Three, two, one, four, over in the corner, up top, back screens, (I’m just) getting the ball and finding a way to impact the game.” Clarkson has made an impact on the game from almost the minute he arrived via a trade with the Lakers at the 2018 Trade Deadline – arriving alongside Larry Nance Jr., with Cleveland surrendering Channing Frye, Isaiah Thomas and a 2018 first rounder that turned out to be Moritz Wagner. In his first game with the Cavs after the deal – a blowout win over the Celtics on the day they retired Paul Pierce’s jersey – Clarkson scored 17 points, going 7-of-11 from the floor, including 3-of-4 from long-range. And he’s barely slowed down since. Jordan Clarkson established himself as one of the top sixth men in the history of the Cavs franchise. Photo: David Liam Kyle/NBAE/Getty Images In his first full season with Cleveland, Clarkson was outstanding from start to finish – leading all Eastern Conference reserves in scoring, netting 72 games in double-figures and missing just a single contest in a season in which the squad was consumed by the injury bug. Clarkson put his name on the map last year, but he’s been the model of consistency for some time now. Over his last four seasons, the 46th overall pick of the 2014 Draft has averaged 15.2 points and 81 games played. He averages double-figure scoring in every arena in the league. And he’s ready to pick up where he left off. ”I did a lot of similar stuff to what I did last summer in terms of working out on the court, my diet,” said Clarkson. “I feel like last year I was in the best shape I've been in since I've been in the NBA. So, this summer I did a lot of similar stuff.” If Clarkson plans to fulfill Coach Beilein’s vision for him, he’ll take even more pounding than he takes running through screens as a 2-guard – and that’ll mean more time in the weight room. But through his first five NBA seasons, his durability is unquestioned. "You know, since my rookie year, I've been kind of put in a box that I'm just a scorer – and I can do that, too – but it's just about finding a balance." Jordan Clarkson, on becoming more versatile this season ”The year before last year – well, actually, all my years before I came here – I wasn't really big on the weight room,” said Clarkson. “So just being able to get in the weight room, trying to convert my body and just be able to take that beating and be ready to bounce back and to have my muscles intact for that. It's always about being able to bounce back because you've got to be able to do that on a consistent basis. And the weight room really helps me with that.” Only the Clippers’ Lou Williams – the NBA’s three-time and reigning Sixth Man of the Year – scored more points (1,485) than Clarkson last year, with the former Tulane standout becoming just the 19th player in NBA history to score more than 1,300 points off the bench in a season. Clarkson was also the major reason Cleveland ranked 6th in the league in bench scoring, averaging a franchise-record 42.6 points per game. In a triple-overtime thriller against Brooklyn just before the All-Star Break, Clarkson notched a career-best 42 points – drilling a season-high seven bombs in the loss. It was the most points scored by a reserve in franchise history (topping Phil Hubbard’s 37), the first 40-point night of his career and the most by any Cavalier last season. In another game against the Nets, he became the first Cavs reserve to net at least 20 points and 10 boards in less than 30 minutes off the bench since 2014. Overall, Clarkson averaged a career-best 16.8 points – 3rd best among all NBA bench players – topping the 20-point plateau on 25 occasions. After his career year, Clarkson didn’t take his foot off the gas this summer and looks to add some new tools to his repertoire this season. ”(This year), just to try to score with less dribbles – being able to play off the ball, being able to play off the catch; catching and driving immediately or making a play for my teammates,” listed Clarkson. “With the new system that coach is putting in, it gives me a lot more opportunity to make plays in terms of getting other guys shots instead of just me scoring.” Aside from moving over to play some 3, Clarkson wants to prove that he’s more than just a scorer. ”I think it's a good system we've got in place, and I feel like I'm going to be able to succeed, because I can pass the ball – I think I've proven that,” said Clarkson, adding: “You know, since my rookie year, I've been kind of put in a box that I'm just a scorer – and I can do that, too – but it's just about finding a balance.” Last year, Clarkson etched his name into franchise history – eclipsing the great Campy Russell as the all-time single-season leader in points off the bench. But he still came up just short against arguably the greatest sixth man in team history – Hot Rod Williams – who averaged 16.87 points per in 1989-90 compared to Clarkson’s 16.84 this past year. The lithe swingman also sees some of last season’s success as an extension of his outstanding play in the Asian Games in August of last year, suiting up for the Philippines for the first time. (His mother is of half-Filipino descent and Jordan holds dual citizenship.) Clarkson scored at least 20 points in each game and led the Philippines to its best finish in 16 years. ”The (international) game is really different, the physicality is different, you don't get as many calls,” said Clarkson. The game is just totally different. You really have to be able to play without the ball and really rely on your teammates to get you shots because they're clogging the paint. ”I think it really helps your development, to be honest with you -- just on how to play and how to get open without the ball.” Still just 27 years of age, Clarkson finds himself as one of the young squad’s seasoned vets. And he’ll have a chance to pass down some NBA wisdom to Cleveland’s collection of youngbloods. ”I’m probably more like an older brother; I wouldn't say 'a mentor' because I'm still learning too,” said Clarkson. “But definitely like an older brother, especially like for Darius, even though he's already got a really good feel for the game. He and I have talked about where he wants me to be on the floor and where he wants to be. We want this to be successful for him and successful for me.” Coach Beilein would love a few more wolves like Clarkson in his pack. But for now, he’s got an alpha male who’s ready to start hunting right where he left off last year.
Q: The following mesh/grid like diagram in tikz I need to create a picture as shown in the attachment. I know how to create the hexagonal substructure and the code for the same can be found in this question: Hyperbolic polyhedron in tikz Please help me out as I'm new to tikz. Edit: The function which maps the subdivided icosahedron to the plane behaves like $z^{6/5}$ at the vertex of degree 5 (hereby denoted by $v$). A: You can draw such grids with \foreach loops. \documentclass[tikz,border=3.14mm]{standalone} \usetikzlibrary{shapes.geometric,calc} \begin{document} \begin{tikzpicture}[web/.style={append after command={foreach \XX in {1,...,#1} {(current.corner \XX) -- (current.center)}},regular polygon,regular polygon sides=#1,minimum size=1cm,draw,alias=current}, pics/outerior/.style={code={ \pgfmathtruncatemacro{\idiv}{pow(2,#1-1)} \draw (90:#1*0.5) foreach \XX in {1,...,5} {-- (90+72*\XX:#1*0.5) coordinate (P-\XX)}; \draw (90:#1*0.5+0.5) foreach \XX in {1,...,5} {-- coordinate[midway] (M-\XX) (90+72*\XX:#1*0.5+0.5) coordinate (Q-\XX)}; \foreach \XX [remember=\XX as \YY (initially 5)] in {1,...,5} {\draw (P-\XX) -- (Q-\XX); \foreach \ZZ in {0,...,#1} {\draw ($(Q-\XX)!{(\ZZ+1)/(#1+1)}!(Q-\YY)$) -- ($(P-\XX)!{\ZZ/#1}!(P-\YY)$) -- ($(Q-\XX)!{\ZZ/(#1+1)}!(Q-\YY)$) ;} } }}] \draw (0,0) node[web=5] (c5) {}; \clip (-3,-3) rectangle (3,3); \draw foreach \X in {1,...,9} {(0,0) pic{outerior=\X}}; \end{tikzpicture} \end{document} P.S. Your prescription does not seem to yield the drawn lattice when using regular polygons (and also to introduce polygons that automatically have the internal lines added via append after command. \documentclass[tikz,border=3.14mm]{standalone} \usetikzlibrary{shapes.geometric,calc} \begin{document} \begin{tikzpicture}[web/.style={append after command={foreach \XX in {1,...,#1} {(current.corner \XX) -- (current.center)}},regular polygon,regular polygon sides=#1,minimum size=1cm,draw,alias=current}] \draw (0,0) node[web=5] (c5) {}; \foreach \X [remember=\X as \Y (initially 5)] in {1,...,5} {\draw let \p1=($(c5.corner \Y)-(c5.corner \X)$),\n1={atan2(\y1,\x1)} in ($(c5.corner \Y)!0.5!(c5.corner \X)$) coordinate (aux) ($(aux)!{-(1/2)*1cm}!90:(c5.corner \X)$) node[web=6,minimum size=6cm/5,rotate=\n1]{};} \end{tikzpicture} \end{document}
The efficiency of Australia’s ports is critical to our efforts to broaden Australia’s sources of economic and jobs growth in the wake of the end of the investment stage of the mining boom. At a time of economic transition, we must ensure that our railways, roads and ports have sufficient capacity to support non-mining sectors, particularly in regional areas. Businesses crave logistical certainty. That’s why 2017 is the right time to lift investment in transport infrastructure, a point made repeatedly by business leaders and economists including Reserve Bank chairman Philip Lowe and his predecessor, Glenn Stevens. We could start by getting on with the long-proposed Inland Rail project linking Brisbane and Melbourne, which would significantly boost capacity through our nation’s eastern agricultural heartland. The former Labor Government invested $600 million improving those parts of the existing lines that would form part of Inland Rail and left $300 million in the Budget for further work. But since 2013, not a single sleeper has been laid. That’s not good enough. In NSW we need to increase the capacity of the Port Botany by completing the duplication of the Port Botany Rail Line as well as the Maldon-Dombarton rail link, which would connect Port Kembla and south-west Sydney. But as important as it is to lift investment, that investment must meet long-term strategic imperatives. If the wrong projects are funded, it will divert funds from productivity boosting projects. The Perth Freight Link was announced in its 2014 Budget, without the benefit of a detailed plan or any cost-benefit analysis into the project. While the stated aim of this project is to take trucks to the Port of Fremantle, planning has been so chaotic that on the current design, the road would stop 3km short of the port. That’s not good enough either. But the broader strategic issue is that the Port of Fremantle will reach full capacity within just a few years. Rather than building the Freight Link, we should focus on port capacity in the long term. It makes more sense to focus on the development of the Outer Harbour proposal, along with investment in public transport in Perth to take cars off the roads and deliver productivity gains across the entire economy. Strategic planning is critical. Last November, in his annual address to Parliament on infrastructure, the Prime Minister announced a plan to develop a strategy to increase the productivity and efficiency of Australia’s freight supply chain. This is unnecessary. The work has already been done. Under the former Federal Labor Government, Infrastructure Australia and the National Transport Commission were tasked with consulting with industry, as well as the states and territories, to produce the National Ports Strategy and the National Land Freight Strategy. Together with the creation of national regulators, historic shipping reforms and Labor’s Government’s record infrastructure budget, these strategies provided a strong foundation for rising productivity and faster economic growth. In the globalised world of the 21st Century, the prices consumers pay and the health of businesses more depends than ever on having better, less congested roads, faster, more reliable railways and modern, efficient sea and air ports. The National Ports Strategy, produced in January, 2011, promotes better long term planning on the waterfront, with operators required to publish 15 to 30 year master plans detailing expected growth at their ports and the facilities required to handle that growth. It also streamlined environmental approval processes and established protocols for better planning around ports, with state and local planning authorities required to implement “buffer” strategies to prevent encroachment on the ports as well as road and rail corridors. The National Land Freight Strategy set out principles for greater focus on an integrated transport system designed to move goods into and out of major ports and around our country quickly, reliably and at lowest cost. These strategies provide a clear framework for moving forward. Anthony Albanese is the opposition spokesman for Infrastructure, Transport and Regional Development.
The Betty White Show The Betty White Show is an American sitcom television series which aired on CBS from September 12, 1977, to January 9, 1978. Fourteen episodes were broadcast. The series was produced by MTM Enterprises. Note: This program should not be confused with two earlier television programs that had the same title—a daytime talk show that ran on NBC February 8, 1954 – December 31, 1954, and a prime-time comedy variety show that ran on ABC February 5, 1958 – April 30, 1958. Synopsis Joyce Whitman (Betty White), a middle-aged actress, lands the lead in a fictitious police series, Undercover Woman (a parody of Angie Dickinson's Police Woman). Joyce is thrilled with the show, but less pleased to learn that the director is her ex-husband, John Elliot (John Hillerman), whom she unfondly refers to as "old pickle puss". He responds in kind, supplying his star with an oversized male double named Hugo (Charles Cyphers), a sexy, much younger onscreen sidekick (Caren Kaye), and dialogue not nearly as sharp as her tongue. Also on hand are Joyce's best friend, Mitzi Maloney (Georgia Engel), co-star actor Fletcher Huff (Barney Phillips) and network penny-pincher Doug Porterfield (Alex Henteloff). Reception The series was scheduled opposite ABC's Monday Night Football and The NBC Monday Movie and failed to generate viewers. The show was canceled after fourteen episodes. Nick at Nite and TV Land briefly reran the show during the 1990s. Cast Betty White as Joyce Whitman, a sharp-tongued actress. John Hillerman as John Elliot, Joyce's ex-husband and director of her show. Georgia Engel as Mitzi Maloney, Joyce's naive best friend/roommate. Recurring Caren Kaye as Tracy Garrett, Joyce's younger, sexier co-star. Charles Cyphers as Hugo Muncy, Joyce's hunky stunt double, who is over-sized. Barney Phillips as Fletcher Huff, Joyce's co-star. Alex Henteloff as Doug Porterfield, a network executive who oversees Undercover Woman. Episodes References External links Category:1977 American television series debuts Category:1978 American television series endings Category:1970s American sitcoms Category:1970s American workplace comedy television series Category:English-language television programs Category:Television series about television Category:CBS original programming Category:Television series by MTM Enterprises Category:Television shows set in Los Angeles
The Argument Against Paying Development Professionals Based on Amount of Funds Raised Few topics generate more heated discussion in non-profit organizations than whether development professionals (staff or consultants) should be paid a percentage of the money raised, receive commission-based compensation, or be paid a performance bonus. Perhaps because it is a practice of giving financial rewards to development professionals contingent upon the achievement of fixed money goals, we can simply refer to it as “contingent-pay.” Whatever you want to call it, two things are becoming more and more apparent. The practice is increasing. The practice is troubling the development profession. Thinking about why we have seen more contingent-pay in recent years, I found myself reflecting on a change I have witnessed in how we development professionals describe and perhaps even think about ourselves. There is a tendency these days to describe our work as fundraising and to call ourselves fundraisers. I have always thought of the volunteers as being the true fundraisers and we development professionals as the people who develop the atmosphere for that fundraising. To some this may seem like an exercise in semantics, but I think it is a great deal more. Many development professionals today enter into consulting agreements or are hired as staff to “raise funds.” Sometimes they even seek to be THE fundraiser for the organization they serve. The result is that these development professionals and their organizations have blurred the once clear difference between the fundraising role of development officers and that of trustees and other volunteer leaders. Many development professionals have become the “fundraisers” for organizations. As a result, contingent-pay methods of compensation have gained acceptance. The argument being, let’s reward people for results and penalize them for poor performance. Contingent-pay becomes an inducement for development professionals to take on the tough job of fundraising and a way for boards to justify turning over to staff or consultants what is essentially a trustee responsibility. To me, the answer to the question of why contingent-pay is so troublesome seems obvious. It is one thing for development professionals to discuss fundraising techniques and philosophies and to strenuously air disagreements. It is quite another to tell people that the way or amount they are paid is unethical. However, the Association of Fundraising Professionals (AFP) takes a strong and unequivocal stand on contingent-pay. Their 1992 position paper developed by the ethics committee states: “Members shall work for a salary or fee, not percentage-based compensation or a commission.” The AFP cites the main consequences of contingent pay: Charitable mission can become secondary to self-gain. Donor trust can be unalterably damaged. There is incentive for self-dealing to prevail over donors’ best interests. AFP, Giving Institute, AHP and other major “for-the-profession” associations tell their thousands of members and all other development professionals not to engage in contingent-pay arrangements. Despite that admonition, many development professionals are not only continuing to do so, but accelerating their acceptance of such compensation schemes. How then do these development professionals deal with the fact that the governing bodies of their profession hold them to be engaging in an unethical practice? I believe that very few of those who work for contingent-pay are truly unethical, rather they are guilty of bad judgment. In this instance, as in so many others in our society, individuals fail to follow long-standing codes of ethics because they reject them as tenets of conduct. What were held in the past to be standards to live by, are often viewed today as mere opinions, open to interpretation and argument. This is a societal problem that we see manifesting itself in this instance in the disavowal of strictures against maximizing personal gain while in the pursuit of recognized philanthropic good. The rationalizations are there for anyone who wants to find them. Commonly Voiced Justifications for Contingent-Pay “Sales incentive programs used effectively in for-profit businesses will work as well in non-profit settings. It makes no difference whether you are selling light bulbs or support for symphony orchestras.” Well, I’ve sold light bulbs and I’ve “sold” support for orchestras, and I’m here to tell you there is a difference. Incentive-driven efforts for the sale of commercial products involve an explicit selling and buying environment which customers understand and expect. When we are seeking voluntary charitable contributions we are not working in the same transactional environment. We are not selling to prospective donors; we are presenting them with an opportunity to realize their own desire to contribute to their community and concerns. We are not telling prospective donors to buy our product because it accomplishes something they need at the best value in the marketplace; we are asking them to consider making a gift to something in which they believe and that they want to support. An individual soliciting a gift is involved in a very different transaction from one selling a product. Ask someone if he or she expects salespeople to get a percentage of the price paid for a purchase; ask a donor if he or she expects the person asking for a gift to get a percentage of that gift. I’ll bet you dollars to donuts the answers are different. “The board won’t or can’t raise the money, so we have to do it.” Time and again I have found that this situation occurs because the development professionals either did not know how to present encouraging and workable fundraising plans to board members involving them as the leaders of the effort, or because the development professionals simply chose not to do so. “If we compensate our development professional on a contingent-pay basis we will not have to pay for development efforts that fail.” This is sheer folly. Boards that say they have nothing to lose actually lose everything. Such an attitude assumes failure. “Contingent-pay means that both the board and the development professionals share the risk.” Nothing could be further from the truth. They share the risk when the risk is the same: that the organization will not make its goal. Contingent-pay creates a situation where the board’s risk is that the organization will not make its goal and the development professionals’ risk is that they will not make their money. Still Not Convinced? While I have no problem espousing with vigor the concept that non-profit development professionals should work only for salaries or predetermined fees, I recognize that the emotional baggage that accompanies discussions of pay and ethical behavior can easily cloud the issue. Therefore, let me point out some additional pitfalls associated with contingent-pay for both the non-profit organization and the development professional. Abrogation of responsibility The board can be less likely to contribute its time to the fundraising effort. That can leave the development professional out on a limb and the organization with an atrophied board. The future be damned! The development professional will find it hard to justify expending time or effort on work that does anything other than maximize the amount of money to be raised in the identified time period. The organization therefore loses the benefit of a development professional working to build a strong base of committed volunteers for future fundraising and other projects and programs. The organization sacrifices long-term health in order to achieve short-term gain. The “hired-gun” syndrome The development professional sees his/her future as based upon simply the achievement of one short-term goal after another rather than the organization accomplishing its overall objectives. It therefore becomes more likely that the development professional will identify his/her professional identity with his/her track record, not with the organization. Whose “customers” are they? When the development professional leaves the organization, relationships established by him/her will leave also, or at the very least, the history of those relationships will disappear. “Raising the bar”Each time the goal is accomplished and the incentives are paid, the board will have the tendency to view itself as having been too easy on the development professional. They will feel they have been “had,” and their goal setting will evolve toward unobtainable levels in order to make sure they are getting full value. This is not how campaign goals should be set. Money is raised to meet a stated purpose and the “well” is never arbitrarily “pumped dry.” The lucky so and so!Should an unexpectedly large gift come in, the board will not want to give the same percentage to the development professional, creating great potential for ill will and bitterness. Not with my money you don’t! Some foundations, corporations, and private donors will not make a contribution if a portion of the gift is to be paid out as a commission. In some instances they will not give to a campaign that pays any commission at all. Read the fine print in any grant, and be prepared to disclose how the development professional is being paid before accepting somebody’s gift. No, no, no, it’s not worth that much! It is hard to pay a commission on in-kind donations. Who determines the cash value? Are you going by wholesale or retail or what? How do you figure the commission? More opportunity for ill will and bitterness. Promises, promises!Most of the money raised in capital and endowment campaigns comes in the form of pledges, with payments often spanning a number of years. The contingent-pay development professional will want his/her money when the pledge is made. What happens when a pledge is not fulfilled? Does the non-profit organization ask the development professional to return the commission? What happens when a multi-year pledge payment schedule is extended an additional number of years by the donor? Does the non-profit organization expect the development professional to wait for years as the payments are made? It was your fault! Development professionals working on a contingent-pay basis are more likely to be viewed by the organization as personally having caused the failure when a goal is not reached. The board does not accept its responsibility, and the development professional is more likely to be fired. No, it was your fault, you misled me! The development professional expects to earn his/her incentives. When he/she doesn’t, it creates a personal financial problem. The development professional is likely to blame the organization and move on to another job, even if he/she isn’t fired. Why don’t I have the chance to make more money too?Contingent-pay is out of line with other compensation practices within a non-profit organization. This can create resentment and lessen the team spirit of staff. In fact, it can create resentment and lead to a failure by other staff to support fundraising efforts. Fair’s Fair What should the rewards be for development professionals of non-profit organizations? Simply, the best market-value compensation that can be managed in the form of annual salaries or reasonable and fair fees paid by the hour, day or for a project. As with other members of any organizations’ staff, the development professionals should be valued for their contributions to their organizations and for the cost to replace them. It’s Your Decision I believe in the standards that have resulted from thousands of development professionals working to help raise billions of dollars over decades of time. For me, not everything should be a matter of personal opinion; codes of ethics are established through collective wisdom because we do need absolutes by which to live. When I see all the wrong that can befall an organization or an individual in contingent-pay schemes, I cannot imagine for the life of me why either would want to go that route. There is a difference between the ethical selling of light bulbs and symphony orchestras. When I was selling light bulbs, I had the responsibility to make sure my customers got full value for the dollar they spent. When I was “selling” a symphony orchestra, I had the responsibility for helping to keep a community asset healthy and strong for my and succeeding generations. It’s that simple. One is about value and the other is about what we value in life. Those are my views on the subject. What are yours? I welcome your comments and suggestions. Share this: 19 Comments commission based fundraising would work well for my situation. i am by myself with little connections and poor people skills…yet i have a non-profit nature camp for disadvantaged children that needs funding…i would greatly benefit from a professional fundraiser motivated by commission. could you please direct me to such services?..thank you. Hello Mike, With sincere respect to you personally, and to your obvious hard work and dedication to help disadvantaged kids, were I even to know of “professional” fund-raisers or such firms, I would do you the greatest service in not telling you who they might be. First, they do not advertise. They are not true professionals. When they do surface, and are hired by unwary non-profits, if they raise any money, the average “take” for them is about 75% of the donations. Many times much more. Check out the website of your own state’s office of the Attorney General and look for the listing of charities and how they fared during the previous year, especially in relation to hired professional fund-raisers. I guarantee you will se how such bandits bilk non-profits out of most of any money raised. You read my article regarding the serious consequences befalling an organization which hires a staff development person to himself or herself raise all, or most, of the funds needed by the organization—made even worse when that person works on a commission, percentage, etc., basis from the funds raised. Read again the admonitions, as they are real. Mike, I know is sounds easy for me to say what I am saying from such a distance, but my experience gives me credibility. —Such hired guns will not work best for you. —You should not be by yourself. As a founder/manager of an accredited non-profit, you should (actually legally must) have others serving as a board of directors. You make connections happen. They do not come to you. —With the great and touching work you do, your only “people skills” needed is to let what good you do for the kids speak for you. Have site visits by people whom you want involved. Let them meet the kids and their parents. Will any of the working parents ask their bosses to look in to your camp? Identify and invite for visits individual in charge of funding from area firms and corporations. Search for local charitable foundations and invite the decision makers to see the good you do. Ideas such as that are the ways for you to move forward with the help of others. Hiring a one-time paid solicitor, or even a staff person to raise the money, is absolutely not the way to go. Slice it, dice it any way you want – it's selling. Here's a thought – how about selling the donors–which you claim would not want the fund raiser to receive a commission–on the same concept we all already know….they do! It's all selling and it's all about compensation – be it salary, commissions, etc. Do you think that a donor believes a lackluster fund raiser will keep their job? Do you think a donor wants them to? Do you think a donor wants the end result of the non profit's service to be accomplished? Do you really think there is a difference between selling the light bulb and selling the donor on how their money accompishes something for the community? Instead of attempting to label selling and compensation as something different than it is…how about you and the ethics committee try to edeucate donors, if this is such a big deal? To me, every argument you presented–and very articulate at that–can be made for people on salary. As a sales professional when did you stop buying into the idea that all humans are in sales? When spouses, students, employees, or salespeople stop performing or "meeting their quota/expectations" they become divorced from each other. Maybe it's time the old ways of claiming what is ethical be transformed instead of the constant attempt to sell us on the difference between selling and…selling? I enjoy all of your writing. Thanks for the thought-provoking discussion. I’ve been a salesperson, a manager of sales forces, and the CEO of companies that rely heavily on spirited selling which is then rewarded by commissions that travel upward with performance. I’ve also asked for charitable donations, headed fundraising efforts, and been CEO of nonprofit organizations. There is a difference between selling products and services and working as a development officer for a nonprofit organization. Fundraising and selling are not the same thing. Many skills and attributes are shared by development officers and salespersons, but a better comparison could be made between the development officer and the sales manager or in some cases the marketing department. The job of a development officer in a nonprofit organization is to manage and execute some or all of the organization’s fundraising endeavors. Having paid, on-staff professionals ask donors for gifts is neither preferred nor desirable. Gifts should be asked for by a peer of the prospective donor. Peer-to-peer asks yield the largest return. The job of the professional development officer is to create the support and environment that prepares peer-group solicitors to make a strong, convincing, and compelling ask. A donor is not a customer. Making a donation is not the same thing as buying a product or service. Making a donation is choosing to expend social capital in support of something you believe makes society better. People also make gifts because, for business or societal reasons, they want to be associated with the cause or associate with its supporters. Whether you are making a donation out of heartfelt conviction or because you want to establish relationships that can help in climbing the social or career ladder, you will want to be asked for that donation by someone you view as “important.” That someone is not a sales person. It is a peer or someone you would like have consider you as a peer. Some time ago, I was given the responsibility for acquiring donations from CEOs of banks and other companies. As CEO of the nonprofit in search of the donations, I could have attempted to ask for the money from these men and women. Now mind you, they either didn’t know me or were acquainted with me in only the most passing manner. I could have attempted to “sell” them on the cause and the amount. Most likely, I would have been shuffled down to a lower level contributions manager, and I would have received a smaller gift or no gift at all. Instead I chose to have current supporters of my organization who were peers of the CEOs make the ask. Every CEO made a gift and made it within the range of what we wanted to acquire from them and their companies. That’s fundraising! And it’s not selling. It’s managing. Yes, there are small-gift solicitation efforts that do not function on a peer-to-peer basis. I have managed such an effort that was responsible for collecting tens of millions of dollars a year, and no one who was “selling” was involved. These efforts were about marketing and technology support not sales. Think database marketing and fulfillment skills. Do I believe that good development officers should be rewarded based on the success they achieve? You bet I do. But I do not believe in making that reward a commission. Annual review and raises are the way to go. Add to that development professionals moving from smaller to larger organizations capable of paying at a higher scale and you have a system of fundraising compensation that has worked well. Finally, a person who is motivated entirely by money–I believe you said, “It’s all about selling and it’s all about compensation – be it salary, commissions, etc.”–is not someone well equipped to be a development officer of a nonprofit organization. When it comes to raising money for a nonprofit the most successful development officers are more committed to the mission of their organization and a belief in the value of the philanthropic sector than they are to defining themselves by how much money they earn. I hope I haven’t sounded too pugnacious in my defense of these points. I do realize that these issues are a topic of discussion that is receiving more attention today than it did a decade or two ago. However, after considerable thought and having walked both sides of the street–and around the block for that matter–I am still convinced of the differences between selling and fundraising and the validity of separate compensation strategies. Thanks for your response – very though-provoking. I have a question – it applies to a more specific case but still on this subject; What if the salary-paid developer isn't able (for whatever reason) to find enough volunteers? He puts into place a monthly membership program which aligns with the non-profit's missions. Would it be feasable/reasonable/ethical for that developer to pay/offer commissions to any paying member who beleived enough in the mission [and wanted to earn extra income] the opportunity to recruit/resale memberships to other people? For the sake of conversation/clarity – say a non-profit gym had a free membership, and also an upgraded $30 monthly membership. Their primary means for raising funds is by allowing any upgraded member to recruit/resale memberships to others for a commission of $10 per month from each member they brought in/sold. Not an MLM but more of a direct sales force promoting the cause of the gym. All the free members, non sales people, etc. benefit from the cause….yet all the "salespeople", in addition to benefitting also get to earn some income. In this sense, the donors are also consumers and beneficiaries of the over cause. Or am I missing some details? If I am missing some details, is there a way to do the example and ensure it's a win/win/win, legal and ethical for everyone? Thanks, James David Patterson on August 13, 2013 at 3:30 pm James, The example you give doesn't seem like fundraising to me. It seems more like recruiting a sales force to sell memberships that have a specific benefit which can be measured in terms of dollars, than asking for donations to a 501c3 nonprofit. I just don't see how this is fundraising as it is understood in the nonprofit world. The very fact that you are offering a quantfiable value exchange would not make the membership a donation. The "donor" would not be able to decare it as a charitable donation. I think the hypothetical gym you suggest would be better served by being a for-profit. I doubt that it could qualify as a 501c3 nonprofit organization. Also, I don't understand the use of the term "developer." That makes the whole proposition sound like a business development to me. That being the case…with the gym example…if it were NP it sounds like you're saying that scenario wouldn't be fundraising, it would be selling/recruting? Which all sounds good to me…or am I missing some details? I'd enjoy emailing you with my specific case which is similar to this if you're available. Thanks, James David Patterson on August 20, 2013 at 12:40 pm James, If a nonprofit organization provides benefits to donors that have material value, the value of those benefits cannot be declared as a tax deduction by the donor and the nonprofit has to declare to the donor the amount of that value and that it is not tax deductible. If your gym qualifies under IRS rules as a nonprofit, then any amount that a member would contribute over and above the value of the membership or any other quantifiable benefit would be tax deductible. All of this would depend on your gym qualifying as a nonprofit under IRS rules and the value of the gym membership that you would be awarding to donors. As far as the whole issue about paying by commission goes, I stand by my earlier statements and arguments about what is ethical best practice for a nonprofit. Thank you for your kind words about our exchanges. I work on this website as a gift to the nonprofit community, and the type of exchange you are suggesting is more in line with how I earn my living. This is not a project I could take on at this time. I suggest you find a consultant in your community or seek advice from those in your community working in philanthropy. A place you might start is by your local community foundation. Many communities also have chapters of professional socities for development pros. Jack Benson on June 10, 2013 at 6:04 pm With commission based fundraising, the benefits to a Not-For-Profit can enormous. Especially for a NFP with limited resources — and cannot afford the upfront fees associated with traditional fundraising fee structures. Lawyers have embraced this practice it for years, taking 30-40 percent on contingency — AND IS CONSIDERED ETHICAL. Like lawyers, if the fundraiser doesn’t produce, they get nothing. If they both only make a phone call to produce big results, it’s a windfall. Other times, it may take more work than the contingency/commission covers. But over time, it balances out. Jack, one big difference is the relatonship between a donor and the organization to which he/she makes a donation. The donor has the righful expectation that as close as possible the money given will go to support the work of the organization. Exorbitant charges such as the 30 to 40 percent you suggest would drive donor's away. Fundraising expense of that magnitude is considered unethical in the nonprofit world and is condemened by all organizations monitoring ethical fundraising. Fundraising for nonprofit organizations is about more than getting money out of prospective donors. It is also about fulfilling a donor's charitable intent. Jack, To my way of thinking, the biggest difference is that the lawyer is working totally on behalf of her or his client. In non-profit fund-raising, there is always a two-pronged approach: being true to the organization’s mission, and at the same time, giving donors the opportunity to give money to something in which they believe and that they want to support. I believe that your favoring of contingent-pay is driven more by the limited, or unavailable, salary-paying resources of a non-profit, rather than by any good argument in support of what is almost always a failing practice. You have read in my article that I have a long and unyieldingly stand against any form of contingent-pay in the non-profit sector—having an organization’s staff development officer working for compensation based on a percentage of funds raised, a bonus, or a commission. Such arrangements, and any variations, are denounced by major “for-the-profession” associations. They go so far as to state, emphatically, that contingent-pay is unethical. Most development professionals themselves think it’s a bad idea. But I do far more than just cite high standards and strong ethics as good reasons to have nothing at all to do with the contingent-pay practice. (Though they could stand alone.) I let the contingent-pay principals know of the very real harm possible when working in that way. In my article on the subject I list a number of very real and damaging consequences that may befall both parties when working to such an arrangement. Read them again, and I hope you will be convinced that they far outweigh what you described as such benefits being enormous in value or quatity. My hard stance against contingent-pay was bolstered even more by a personal experience, several years ago, when I was engaged as a fundraising consultant for a major organization. Sadly, it represents what appears to be an ever-growing issue. During my several months serving the organization, I conceived, developed and produced fundraising plans where there had been none. Annual, endowment, capital, sponsorship, and underwriting campaigns were all fully developed and were being phased into the duties of the organization’s new and first-ever Director of Development … whom I helped hire. The individual was hired at a straight annual salary basis while I was nearing the end of my consulting term. Soon, the Director of Development was up and running very well and I concluded my consulting engagement. In a routine phone call some months later, just to check in to see how that individual was doing, it became readily clear that the several key development initiatives I had set out for the organization had not progressed much, if at all, except for the Annual Fund. There were no ongoing cultivation activities. Recruitment of a volunteer fundraising team was abandoned. There was nothing in place to ensure opportunities for long-term funding. What was clear was that the Director of Development was dead set only on meeting the Annual Fund goal. Why? Because after I left, the next salary review with management allowed the D.O.D. to work toward a bonus of $5,000, contingent upon meeting the Annual Fund goal by the end of the campaign/fiscal year. Just about all of the warnings I cite in my article were at work in this case. Money was being raised only for this year. There was no thinking/planning for tomorrow. When I see all the wrong that can befall an organization, or an individual, in contingent-pay schemes, I cannot imagine for the life of me why anyone would want to go that route. What if the 5000 bonus would have been for meeting goals in ALL the areas your developed? PS on March 21, 2013 at 6:21 pm We are debating about this as a NP about a specific short-term position that is soley responsible for selling marketing/promotions package as part of a fundraiser. We are looking to develop some business/corporate partners to build our respective lists while providing marketing opportunities to the wider community. We are also approaching prospects to support through their marketing budgets and not their philanthropic budgets. We would like to have a sales person in this position and incentivize through commissions. Thoughts? PS, My first thought would be that you should consider that you are a non-profit organization, balancing what you do for the public good according to your mission, then to give considerable thought that, perhaps, the marketing focus you are debating about might take you away from that mission and cause problems with your regular support and volunteer base, and the IRS. Foregoing philanthropy, and getting into “sales,” with a commission-based approach, can be risky, if not causing problems connected to your non-profit status, then to discourage your volunteer base and donors who could think that your money needs are being satisfied by what is a commercial endeavor. There are IRS rules regarding percentages of income a non-profit is allowed through such means, if support from the public in the form of contributions is minimal. If the main thrust of the plan is to offer to sell and promote the products and services of commercial businesses and vendors, and this program will take considerable time and effort, balanced to paying a commission to the staff facilitator, then I would sugges that you think about abandoning the plan. Maybe my article, which is somewhat based on this idea, will be of use: Should Your Organization Sell Products And Services To Raise Money?http://www.raise-funds.com/2001/should-your-organization-sell-products-services-to-raise-money/ Rodney: You have shown me all of the good reasons why you must hire a Director of Development now. — Spectacular and well-received performances; — Large expense budget, enough to operate a large and complex organization; — Five years of successful operation, which means you should have a reasonably-sized, appreciative, and some moneyed audience—patrons needing to be asked in the right way for money; — You are already paying a host of people, so adding one more—a critically important individual—must be done. Biting the bullet now, and getting a seasoned and capable pro will help begin to have those board members know a good deal about fund-raising, and have them do something about it. You do have income from the performances. You can have working capital from the board members and from those patrons whom you know could give major gifts. I am on the board of Spectacular Senior Follies, an organization that produces a variety show once a year for four performances. We are going into our fifth year of struggling to keep a very successful venture (attendancewise) afloat working with a board, the individual members of which know nothing about development or fundraising (using the distinction in your article). We have somehow managed to pay most of our bills (which runs to six figures) to date, but that's all. No one receives monetary compensation except the creative staff, i.e., the director, the musicians, the wardrobe person, the choreographer, the state crew, and the lighting and sound people. I don't see how we can keep this up much longer, despite the success of the show from an attendance standpoint. My question is very simple, if not simplistic. How do you pay the salary of a development professional when you have no income or working capital? Thank you for any response and ideas you may have. Let me add to Tony’s comments one more thought. Development directors and personnel of nonprofits are not in an equivalent position to sales persons. Maybe to a sales or a marketing manager. Development professionals devise and execute plans to take an organization’s fundraising needs to market–that market being donors and potential donors. With the exception of small-gifts and annual campaigns, the people “making the sale” are volunteer solicitors. That is because peer-to-peer requests yield more and larger gifts. As a development director, or even an executive director, you don’t want me asking a person with a net worth of $150 million for a gift of a million dollars. You want another person from his/her economic/business/social set making that ask. Getting back to small-gifts and annual campaigns: in them you rely strongly on tools like direct mail and the internet to generate “sales”–to bring in the donations. Hello RW: Thank you for your welcome comment. You said in your last paragraph: “Best of all, there is no risk to the nfp. If the development agent brings in revenue, they get paid, if they do not, they will not be compensated. Their survival is tied to the health and welfare of the nfp.” There is indeed great risk to the non-profit. No non-profit’s survival should ever be in the hands of any one person to bring in the revenue. It’s far more than just not being compensated when she or he does not bring in revenue. Others on staff will not be compensated as well. Worse, the dozens, hundreds, even thousands of people in need depending on the programs and services of the non-profit, are left high and dry. The job of seeing to it that the necessary funds are raised, is that of the non-profit’s Board of Trustees. Non-profits are public entities, and as such, belong the community. The operation of them is the responsibility of a volunteer board of trustees who must take on fund-raising as their duty. I appreciate that a for-profit sales rep does as well have risk to her or his livelihood, including her or his family’s financial security, but should the rep not bring in enough revenue, the company does not go under. The product is still available in the marketplace, but for sure, not the food bank that so many people counted upon once the revenue support stream was not sufficient. Your well-articulated countering argument is appreciated. I know it well, as you can see from my article, which juxtaposes the concepts of a commercial sales transaction to the very different non-profit solicitation of a donation. I came from nineteen years with General Electric marketing light bulbs, then on to twenty years as Director of Development for The Cleveland Orchestra. So, I know well the customer/sales and donor/gift process of each, and I have seen and worked the differences and similarities of each. Note the many similarities as you have correctly implied, but the marked and stark differences in mission and bottom-line, must be taken into account to make impossible the commission compensation arrangement to work for the non-profit fund-raising professional. Here is an example of the real difference between the sales rep and the development officer. It’s a brief summary of what was an actual experience I had as consultant for a major organization. During my sixteen months in service, I worked a great deal to develop and fine-tune fund-raising plans where there had been none previously. annual, endowment, capital, sponsorship, and underwriting campaigns were all fully developed and were phased into the duties of the organization’s first-ever Director of Development. He was hired on a straight salary while I was there. Soon, he was up and running, doing well, and I concluded my engagement. In a routine phone call some months later, just to check in to see how he was doing, it became readily clear that he had not progressed much, if at all, in the development of any fund-raising campaign but the Annual Fund. There were no cultivation activities, no building of a volunteer solicitation team led by the Board, or any effort expended for long-term funding. He was just dead set to get to the Annual Fund number goal set out for him. Why? Because after I left, the next salary review had him set to work to a bonus of $5,000 to be given to him should he meet the Annual Fund goal by the end of the campaign and Fiscal Year. Just about all of the warnings I give in my article were at work in this case, and were not heeded. All future development/cultivation plans were abandoned in favor of the immediate reward, and the need to himself bring in the revenue for this year’s ability to make a living. I rest my case. Thanks again for your welcome interest. Thank you for a well-thought and well written article. I must respectfully disagree. For 24 years I have been a straight commission sales rep, working for “for-profit” companies. Early in my career I made very little and rubbed nickels together to make ends meet. The more I learned, and the more relationships I built, the more my income grew. I have grossed over $400k per year in some years, less than$100k in others. Those of us in this business ride the economic boom and bust, carving out a living and supporting our families the best we can. What is the cost of my efforts to the company? A straight 10% of sales. I am a known quantity and my rate is built into the operating cost of goods. Whether that company has one rep or 100 reps in the field, the cost per sale is still a steady 10%. When I close a large sale, I celebrate, if I close a number of small sales, I celebrate. If I receive an order that is spread out over months or potentially years, I receive my commission upon payment. If I leave a company with receivables on the books, I continue to receive my split of commissions for those months or years, even after I leave. It is a nice bonus in my income that is paid because it was earned. The for-profit world uses independent reps and have built a system that works. Independent Reps want these jobs because they are potentially lucrative. We stay on the job for many years because our early investment grows year over year. Non-Profits must embrace commission development because it does provide an opportunity for those who give of their time and effort to earn a living wage. It encourages development professionals to continue to build relationships with donors and foundations. It provides a financial base for their effort to become a career, not just a summer job. Best of all, there is no risk to the nfp. If the development agent brings in revenue, they get paid, if they do not, they will not be compensated. Their survival is tied to the health and welfare of the nfp.
Many people don’t know that influenza — the respiratory disease we often call the flu — kills thousands of people each year and sends hundreds of thousands more to the hospital. That’s why the Centers for Disease Control and Prevention (CDC) says most people who are six months or older should get a yearly flu shot. There are many reasons that this is good advice. You can get the disease from someone who doesn’t know they have it and doesn’t have any symptoms. And avoiding the two to three weeks of fever and severe headaches, muscle aches and pains some people will endure this winter makes the flu shot worth your time and trouble. Your decision to get a flu shot will also help protect other members of your community. That includes older people, children and pregnant women. It also includes people of all ages who have health problems like asthma or diabetes. They’re all among those who have the highest risk of developing serious, even life-threatening, complications if they contract the disease. Preparing for the 2017-2018 Flu Season Like they have in previous years, researchers have updated the vaccine. This is to protect you from the flu strains they believe will be the most common during the coming flu season. The result is an effective vaccine you can get from your health care provider or your local pharmacy at low or no cost. In many cases, nearby hospitals or your workplace will also offer flu shots. The vaccine could cause a reaction, but it’s almost always mild. You might have redness, pain and swelling at the injection site or even a brief fever. Both are treatable with an over-the-counter pain reliever. And the vaccine may offer compelling health benefits. In fact, a recent study found that it lowered the odds of a heart attack, stroke or other major cardiac event in high-risk patients by about a third over the year following a vaccination. Keep in mind that it takes about two weeks for the shot to start protecting you. That’s why you shouldn’t wait until the flu season is raging to get your shot. Get it as soon as it’s available in your area. The CDC recommends getting it by the end of October. What If You Still Get the Flu? You may get the flu even if you receive a flu shot, but it’s often milder than if you had not gotten the shot. If you or a family member gets sick, remember these tips: Stay home from work or school for at least 24 hours and until your fever subsides without a fever-reducing medicine. Get plenty of rest. Use over-the-counter medicines to ease symptoms. Talk to your doctor about care for young children. Don’t put it off. Don’t skip the flu shot because you got it last year or because you don’t have time. It will take far less time out of your schedule to get the shot than you’ll spend recovering from the flu. Plan to get your flu shot as soon as it is available so you’ll be protected before peak season hits. We’ve got you covered. “The flu is a serious disease that can lead to hospitalization and sometimes even death, and the best way to prevent the flu is to get vaccinated,” said Dr. Eugene Sun, Chief Medical Officer, Blue Cross and Blue Shield of New Mexico. “The CDC also recommends everyday preventive actions, like staying away from people who are sick, covering coughs and sneezes, and frequent hand-washing, to help slow the spread of germs that cause illnesses like the flu,” Dr. Sun said. “In many cases, the flu vaccine and other preventive care services are provided at no cost* to you, so talk to your doctor about getting vaccinated today.” *Preventive services at no cost applies only to members enrolled in non-grandfathered health plans. You may have to pay all or part of the cost of preventive care if your health plan is grandfathered. To find out if your plan is grandfathered or non-grandfathered, call the customer service number on your member ID card.
// Copyright (C) 2020 Igalia, S.L. All rights reserved. // This code is governed by the BSD license found in the LICENSE file. /*--- esid: sec-get-temporal.date.prototype.day ---*/ const day = Object.getOwnPropertyDescriptor(Temporal.Date.prototype, "day").get; assert.sameValue(typeof day, "function"); assert.throws(TypeError, () => day.call(undefined), "undefined"); assert.throws(TypeError, () => day.call(null), "null"); assert.throws(TypeError, () => day.call(true), "true"); assert.throws(TypeError, () => day.call(""), "empty string"); assert.throws(TypeError, () => day.call(Symbol()), "symbol"); assert.throws(TypeError, () => day.call(1), "1"); assert.throws(TypeError, () => day.call({}), "plain object"); assert.throws(TypeError, () => day.call(Temporal.Date), "Temporal.Date"); assert.throws(TypeError, () => day.call(Temporal.Date.prototype), "Temporal.Date.prototype");
Verified Services Mar 11 Good morning, Father! Thank you for my tiny little toaster oven, just big enough to toast myself some homemade granola for my breakfast!! I now have a way to make small amounts of baked goods (anything that can fit on a pie plate) and give some as gifts to my peeps! Yay!! I am just about to walk over to give my adopted family a gift of granola in a little stoneware pot… after I finish this post and get done eating my own granola in yogurt breakfast, lol. Luke 6:37-45 ERV “Don’t judge others, and God will not judge you. Don’t condemn others, and you will not be condemned. Forgive others, and you will be forgiven. Give to others, and you will receive. You will be given much. It will be poured into your hands—more than you can hold. You will be given so much that it will spill into your lap. The way you give to others is the way God will give to you.” Jesus told them this story: “Can a blind man lead another blind man? No. Both of them will fall into a ditch. Students are not better than their teacher. But when they have been fully taught, they will be like their teacher. “Why do you notice the small piece of dust that is in your friend’s eye, but you don’t see the big piece of wood that is in your own eye? You say to your friend, ‘Let me get that little piece of dust out of your eye.’ Why do you say this? Can’t you see that big piece of wood in your own eye? You are a hypocrite. First, take the wood out of your own eye. Then you will see clearly to get the dust out of your friend’s eye. “A good tree does not produce bad fruit. And a bad tree does not produce good fruit. Every tree is known by the kind of fruit it produces. You won’t find figs on thorny weeds. And you can’t pick grapes from thornbushes! Good people have good things saved in their hearts. That’s why they say good things. But those who are evil have hearts full of evil, and that’s why they say things that are evil. What people say with their mouths comes from what fills their hearts. Lord, many times what comes out my mouth does not seem to accurately reflect what is in my heart. I ask you to purify my words, my mind, my heart. Help me to be filled with YOUR goodness on the inside, and to be able to express goodness in a way that people do not misinterpret. Help me to become more skilled at both listening and speaking with kindness and compassion. Amen.
Posts James Tobin was a leading - perhaps the leading - American neo-Keynesian macroeconomist in the era of Keynesian dominance after World War II that extended through to the early 1970s. Along with growth theorist Robert Solow and micro and trade theorist Paul Samuelson, the three substantially shaped what became known as the neoclassical synthesis which fused neoclassical microeconomic theory, Keynesian macro theory, and neoclassical growth theory. The macroeconomic component of the neoclassical synthesis is termed neo-Keynesianism. All three received the Royal Bank of Sweden Prize in Economic Science in Memory of Alfred Nobel, with Tobin winning his prize in 1981. Tobin died in 2002, aged 84. Robert Dimand (2014) has written a short book, which is part of The Great Thinkers in Economics series edited by Tony Thirlwall, on Tobin’s economics. For purposes of truth in advertising, it should be noted that Dimand was a student of Tobin’s at Yale University and wro… Central Banks Going Beyond Their Range -- John Eatwell and John B. Taylor (yep, that's correct). A bit old, but worth reading. They are against lower rates, but I doubt they agree on fiscal and regulatory issues I don't normally post about this stuff, but the possible implications of this technological change are far reaching, not just in terms of safety as implied in the video (somewhat promotional; hey, it's WIRED), but also in terms of employment. Jimmy Hoffa is turning on his grave, wherever that is. The original WIRED piece here. According to the BEA, the advance estimate of GDP growth in the third quarter is 2.9%, which is a significant improvement on the second quarter (1.4%). So maybe there is no recession in the near future (Neil Irwin might be right about that), which does not mean Yellen should hike the Fed rate in December anyway. Next semester I'll be teaching a senior seminar on the history of central banks. The idea is to blend economic history, history of economics ideas and monetary theory in equal parts. And I decided to post on some of the topics I'll discuss in the class, very much like Robert Paul Wolff's tutorials in his blog The Philosopher's Stone, but probably in a less instructive and interesting way than the ones he posts (see this multi-part tutorial on Marx that starts here, and continues here and here and so on; last of nineteen parts here). In the same spirit, I'll suggest some readings for those interested in the topic. An obvious place to start is with the paper "The changing role of central banks" by Charles Goodhart, or for an alternative interpretation by yours truly go here. The new Bank for International Settlements (BIS) Triennial Central Bank Survey was published last month. The Foreign exchange turnover is down for the first time since they started in 1996. As the press release says: "Trading in FX markets averaged $5.1 trillion per day in April 2016. This is down from $5.4 trillion in April 2013." The figure below shows the main results. Not surprisingly the dollar remained the key vehicle currency, being on one side of around 88% of all trades, while the euro has continued to slide down approximately from 39% in 2010 to 31% now. Also, while the yuan or renminbi is now the most actively traded developing country currency, the rise in the share in global foreign exchange turnover is from 2.2% to 4%. PS: For those interested here there is an old paper, but I think still relevant, on the dollar after the crisis, and why there should be no fear about its dominant position. In their recent NYTimes op-ed Paul Volcker and Peter Peterson say: Yes, this country can handle the nearly $600 billion federal deficit estimated for 2016. But the deficit has grown sharply this year, and will keep the national debt at about 75 percent of the gross domestic product, a ratio not seen since 1950, after the budget ballooned during World War II. The practical consequence of large deficits and debts, according to them is that: Our current debt may be manageable at a time of unprecedentedly low interest rates. But if we let our debt grow, and interest rates normalize, the interest burden alone would choke our budget and squeeze out other essential spending. There would be no room for the infrastructure programs and the defense rebuilding that today have wide support. It’s not just federal spending that would be squeezed. The projected rise in federal deficits would compete for funds in our capital markets and far outrun the private sector’s capacity to save, to finance in… This paper explores lock-in and lock-out via economic policy. It argues policy decisions may near-irrevocably change the economy’s structure, thereby changing its performance. That causes changed economic outcomes concerning distribution of wealth, income and power, which in turn induces locked-in changes in political outcomes. That is a different way of thinking about policy compared to conventional macroeconomic stabilization theory. The latter treats policy as a dial which is dialed up or down, depending on the economy’s state. Lock-in policy is illustrated by the euro, globalization, and the neoliberal policy experiment. The persistence of low interest rates has dominated the news. In general related to whether the Fed will or will not increase the interest rate by the end of the year. The Economist tried a few weeks ago to put things in perspective, and suggested not only that the current nominal rates close to zero are unprecedented, but it sort of indicated that the negative real rates are also to some extent a new phenomenon. The explanations for low rates can be found here, and the consequences, according to The Economist here (btw, for them is a pension crisis; and yeah, just wait this will be used to call for privatization). I'm not particular keen, as you know, on the idea of a savings glut, as an explanation for the low rates. The reason is much simpler and is associated to the fall out from the previous crisis. But at any rate I just wanted to check the data. They showed the nominal short term rate in the UK (below), which is not very different from what can be found in other sources us… Economists for Peace and Security will conduct its 9th annual policy symposium at the Hyatt Regency Capitol Hill in Washington DC on November 14, 2016 to discuss the economic dimensions of the most pressing global security issues and those facing the domestic economy. Following one of the most unusual presidential and congressional elections in US history, three panels of senior specialists will present ideas for improving prospects for peace, and growth with fairness for all Americans. This is a bit old. The Economic Report of the President was published a while ago. I just was looking recently, essentially because it has a chapter on the 70th anniversary of the Council of Economic Advisers (CEA). The report discusses the role of Leon Keyserling, the second chair of the CEA, but the most relevant one in the early period, who, like Eccles at the Fed, tends to be a relatively underestimated and forgotten influence on the rise of Keynesian economics (that's in this chapter). That is enough to make this Report worth reading. But the first chapter (on inclusive growth) tackles the issue of inequality, and not just income, but wealth too. Below the shares in wealth distribution for the top 0.1%, 1% and the bottom 90%. It's very clear that while the New Deal compressed the shares of the top groups, the Reagan Revolution has completely reversed the earlier achievements. And it is also clear that inequality is important also at the top, since the 0.1% do so much bet… New paper. From the abstract: Prebisch believed that understanding the evolution of capitalist economies over time and in different contexts required a general cycle approach, encompassing all the different areas of economic activity, which he labelled “economic dynamics.” This theory, developed between 1945 and 1949, stemmed from a critique of both neoclassical and Keynesian theories, which Prebisch viewed as static representations of capitalism. It was applied first to a closed economy and then to a centre-periphery context. The theory combined the notion that profit is the driving force of economic activity, with a process of forced savings and the idea that the time lag between income circulation (and the resulting demand) and the completion of the production process are the main source of cyclical fluctuations. Prebisch’s dynamics theory, which he never completed, influenced his “development manifesto” (Prebisch, 1950). Read full paper here. New paper on INET. Here is from Lance's conclusion: ... writing in the General Theory after leaving his Wicksellian phase, Keynes said that “... I had not then understood that, in certain conditions, the system could be in equilibrium with less than full employment….I am now no longer of the opinion that the concept of a ‘natural’ rate of interest, which previously seemed to me a most promising idea, has anything very useful or significant to contribute to our analysis (pp. 242-43).” Today’s New “Keynesians” have tremendous intellectual firepower. The puzzle is why they revert to Wicksell on loanable funds and the natural rate while ignoring Keynes’s innovations. Maybe, as he said in the preface to the General Theory, “The difficulty lies, not in the new ideas, but in escaping from the old ones… (p. viii).” His point is that while there are good reasons to believe in the forces of stagnation, the reasons are not the Wicksellian ones given in New Keynesian models. Worth reading. By Thomas Palley I received an e-mail from an undergraduate economics student who was curious about economic policy in Washington, DC. His question says a lot about the current state of affairs. Here it is with my reply. From: Xxxxxxx Xxxxxxx [mailto:xxxxxxxxxxxxxx@xxxx.com] Sent: Saturday, October 1, 2016 10:56 AM To: mail Subject: Question from an undergraduate Dear Dr. Palley, I am a first-year undergraduate in economics and political theory, and a longtime admirer of your work. What are your thoughts on how Keynesian/Post-Keynesian ideas are treated in current political discourse? I was in Washington D.C. recently and I had conversation with a Brookings fellow who told me that he thought Joseph Stiglitz was an “extremist who isn’t taken seriously by anyone who knows their way around the Beltway.” Does it worry you that ideas which used to be considered “mainstream” (like social democracy) are now increasingly considered “extreme”? Deeply grateful for your time and attention Sinc…
Here is the overwhelming evidence gathered by the top researchers made in an effort to prove Obama's Reptilian bloodline. Control: Obama has already created an economic dependency upon the government in order to establish a level of control never seen before in the United States. The control of the Auto industry and the Financial System, as well as the Executive, Legislative, and Judicial branches of government set up our society for Reptilian control. Think about it, in the United States of America has their ever been a desire for a completely control system by any of our leaders? Missing Birth Certificate: The majority of American citizens would have no problem producing a birth certificate should the need arise. Why would the President of the United States of America, the most powerful man in the world, have to desperately hide his identity? Quite obviously, if Obama's birth certificate was found,his Reptilian bloodline and questionable heritage would be revealed. Body Language: Experts on body language have noted (although largely silenced by the media) very odd almost animal-like body language from our President. Notice the way Obama holds his head. The neck is thrust upwards and often to one side or the other (identical to a lizard). Additionally, Obama's walk has a distinctly nonhuman side-to-side sway (you could say slither). Experts have also noted the president's speech patterns as being abnormally rhythmic and musical. On the off chance Romney is elected Tuesday, I'm going to miss this kind of crazy. I mean, sure, the nonsense "ZOMG-the-other-guy-and-his-evil-special-interests-are-going-to-destro y-the-country-and-I-don't-know-if-we'll-get-it-back" nonsense will shift over to the Democrats, and they'll make all sorts of insinuation about *his* religion, but I somehow suspect the Romney conspiracy theories just won't be as much fun. Fluorescent Testicle:Bonus: He's technically almost correct, but that somebody was Darrell Issa (R-Traitor). "Look we have to make priorities and choices in this country. We have...15,000 contractors in Iraq. We have more than 6,000 contractors, a private army there, for President Obama, in Baghdad. And we're talking about can we get two dozen or so people into Libya to help protect our forces. When you're in tough economic times, you have to make difficult choices. You have to prioritize things." This type of thing gets on my nerve and I hate how they grouped the obviously false stuff (time travel on Mars) with the true stuff (born in Kenya). Trying to make all of us who know the truth look silly with there stupid venn. U.S. patroits wont be deterred by this tactic though. You want conspiracies then look at libs who said that Trig Palin isnt really Sarahs son and she she didnt give birth in Juno. Thats false and its blood libel. Michelle Obama saying Whitey? True. She has a very mercurial temper and is racist and she said it in the heat of the moment. Obama is the anti christ? True. He is proof that true evul can prevail when evulution is taught in schools and gays allowed to flourish. He will bring 100 million muslins to the US? True. He wants to wipe Isreal from the map turning it into a flaming Jew pit. Earned his funds from a Saudi Prince? True. He recruited Dodi Fayed to kill Princess Diana for the Saudi Royal family and was rewarded with a fortune. And he caused the shooting in Aurora, the Neptunes ghostwrote his love raps, the list goes on and on. People call Romney a plutocrat but Obama is the one trying to make the government rich off taxing the working man, and his cup runneth over. And if you took a look at the cup Id bet that youd find that it is all being given to the Muslin Brotherhood. By the way Romney is a genius who actually took his SAT earning his way into college. Obama faked his transcripts. Heres the bottom line: Obamas real father was Apollo Creed and he WAS married to his gay Pakistani roommate, Freddy Mercury. Little known fact, Mercury died of aids. Could Obama have been patient 0? Yes, because he is literally 0bama. Case closed. StopArrestingMe:This type of thing gets on my nerve and I hate how they grouped the obviously false stuff (time travel on Mars) with the true stuff (born in Kenya). Trying to make all of us who know the truth look silly with there stupid venn. U.S. patroits wont be deterred by this tactic though. You want conspiracies then look at libs who said that Trig Palin isnt really Sarahs son and she she didnt give birth in Juno. Thats false and its blood libel. Michelle Obama saying Whitey? True. She has a very mercurial temper and is racist and she said it in the heat of the moment. Obama is the anti christ? True. He is proof that true evul can prevail when evulution is taught in schools and gays allowed to flourish. He will bring 100 million muslins to the US? True. He wants to wipe Isreal from the map turning it into a flaming Jew pit. Earned his funds from a Saudi Prince? True. He recruited Dodi Fayed to kill Princess Diana for the Saudi Royal family and was rewarded with a fortune. And he caused the shooting in Aurora, the Neptunes ghostwrote his love raps, the list goes on and on. People call Romney a plutocrat but Obama is the one trying to make the government rich off taxing the working man, and his cup runneth over. And if you took a look at the cup Id bet that youd find that it is all being given to the Muslin Brotherhood. By the way Romney is a genius who actually took his SAT earning his way into college. Obama faked his transcripts. Heres the bottom line: Obamas real father was Apollo Creed and he WAS married to his gay Pakistani roommate, Freddy Mercury. Little known fact, Mercury died of aids. Could Obama have been patient 0? Yes, because he is literally 0bama. Case closed. StopArrestingMe:This type of thing gets on my nerve and I hate how they grouped the obviously false stuff (time travel on Mars) with the true stuff (born in Kenya). Trying to make all of us who know the truth look silly with there stupid venn. U.S. patroits wont be deterred by this tactic though. You want conspiracies then look at libs who said that Trig Palin isnt really Sarahs son and she she didnt give birth in Juno. Thats false and its blood libel. Michelle Obama saying Whitey? True. She has a very mercurial temper and is racist and she said it in the heat of the moment. Obama is the anti christ? True. He is proof that true evul can prevail when evulution is taught in schools and gays allowed to flourish. He will bring 100 million muslins to the US? True. He wants to wipe Isreal from the map turning it into a flaming Jew pit. Earned his funds from a Saudi Prince? True. He recruited Dodi Fayed to kill Princess Diana for the Saudi Royal family and was rewarded with a fortune. And he caused the shooting in Aurora, the Neptunes ghostwrote his love raps, the list goes on and on. People call Romney a plutocrat but Obama is the one trying to make the government rich off taxing the working man, and his cup runneth over. And if you took a look at the cup Id bet that youd find that it is all being given to the Muslin Brotherhood. By the way Romney is a genius who actually took his SAT earning his way into college. Obama faked his transcripts. Heres the bottom line: Obamas real father was Apollo Creed and he WAS married to his gay Pakistani roommate, Freddy Mercury. Little known fact, Mercury died of aids. Could Obama have been patient 0? Yes, because he is literally 0bama. Case closed. Lenny_da_Hog:StopArrestingMe: This type of thing gets on my nerve and I hate how they grouped the obviously false stuff (time travel on Mars) with the true stuff (born in Kenya). Trying to make all of us who know the truth look silly with there stupid venn. U.S. patroits wont be deterred by this tactic though. You want conspiracies then look at libs who said that Trig Palin isnt really Sarahs son and she she didnt give birth in Juno. Thats false and its blood libel. Michelle Obama saying Whitey? True. She has a very mercurial temper and is racist and she said it in the heat of the moment. Obama is the anti christ? True. He is proof that true evul can prevail when evulution is taught in schools and gays allowed to flourish. He will bring 100 million muslins to the US? True. He wants to wipe Isreal from the map turning it into a flaming Jew pit. Earned his funds from a Saudi Prince? True. He recruited Dodi Fayed to kill Princess Diana for the Saudi Royal family and was rewarded with a fortune. And he caused the shooting in Aurora, the Neptunes ghostwrote his love raps, the list goes on and on. People call Romney a plutocrat but Obama is the one trying to make the government rich off taxing the working man, and his cup runneth over. And if you took a look at the cup Id bet that youd find that it is all being given to the Muslin Brotherhood. By the way Romney is a genius who actually took his SAT earning his way into college. Obama faked his transcripts. Heres the bottom line: Obamas real father was Apollo Creed and he WAS married to his gay Pakistani roommate, Freddy Mercury. Little known fact, Mercury died of aids. Could Obama have been patient 0? Yes, because he is literally 0bama. Case closed. So I'm in the car right now with my mother in law. She hates Obama with a passion. She swears up and down that if O is reelected, the economy will crash again. She believes that he is the reason that it crashed to begin with. When I mentioned that the crash happened before he was elected, she said that the stock market is "predictive" and it crashed because wall St new he was going to win. It's Obama-the-president's fault the housing market crashed and so many Americans are out of work. If McCain had won, the crash would have never have happened. StopArrestingMe:This type of thing gets on my nerve and I hate how they grouped the obviously false stuff (time travel on Mars) with the true stuff (born in Kenya). Trying to make all of us who know the truth look silly with there stupid venn. U.S. patroits wont be deterred by this tactic though. You want conspiracies then look at libs who said that Trig Palin isnt really Sarahs son and she she didnt give birth in Juno. Thats false and its blood libel. Michelle Obama saying Whitey? True. She has a very mercurial temper and is racist and she said it in the heat of the moment. Obama is the anti christ? True. He is proof that true evul can prevail when evulution is taught in schools and gays allowed to flourish. He will bring 100 million muslins to the US? True. He wants to wipe Isreal from the map turning it into a flaming Jew pit. Earned his funds from a Saudi Prince? True. He recruited Dodi Fayed to kill Princess Diana for the Saudi Royal family and was rewarded with a fortune. And he caused the shooting in Aurora, the Neptunes ghostwrote his love raps, the list goes on and on. People call Romney a plutocrat but Obama is the one trying to make the government rich off taxing the working man, and his cup runneth over. And if you took a look at the cup Id bet that youd find that it is all being given to the Muslin Brotherhood. By the way Romney is a genius who actually took his SAT earning his way into college. Obama faked his transcripts. Heres the bottom line: Obamas real father was Apollo Creed and he WAS married to his gay Pakistani roommate, Freddy Mercury. Little known fact, Mercury died of aids. Could Obama have been patient 0? Yes, because he is literally 0bama. Case closed. mgshamster:So I'm in the car right now with my mother in law. She hates Obama with a passion. She swears up and down that if O is reelected, the economy will crash again. She believes that he is the reason that it crashed to begin with. When I mentioned that the crash happened before he was elected, she said that the stock market is "predictive" and it crashed because wall St new he was going to win. It's Obama-the-president's fault the housing market crashed and so many Americans are out of work. If McCain had won, the crash would have never have happened. If O wins and things improve, try this: At six month intervals ask why her fears never came true. Chart the responses. After four years you will have an interesting report on the human psyche. Publish. Profit! StopArrestingMe:This type of thing gets on my nerve and I hate how they grouped the obviously false stuff (time travel on Mars ) with the true stuff (born in Kenya). Trying to make all of us who know the truth look silly with there stupid venn. U.S. patroits wont be deterred by this tactic though. You want conspiracies then look at libs who said that Trig Palin isnt really Sarahs son and she she didnt give birth in Juno. Thats false and its blood libel. Michelle Obama saying Whitey? True. She has a very mercurial temper and is racist and she said it in the heat of the moment. Obama is the anti christ? True. He is proof that true evul can prevail when evulution is taught in schools and gays allowed to flourish. He will bring 100 million muslins to the US? True. He wants to wipe Isreal from the map turning it into a flaming Jew pit. Earned his funds from a Saudi Prince? True. He recruited Dodi Fayed to kill Princess Diana for the Saudi Royal family and was rewarded with a fortune. And he caused the shooting in Aurora, the Neptunes ghostwrote his love raps, the list goes on and on. People call Romney a plutocrat but Obama is the one trying to make the government rich off taxing the working man, and his cup runneth over. And if you took a look at the cup Id bet that youd find that it is all being given to the Muslin Brotherhood. By the way Romney is a genius who actually took his SAT earning his way into college. Obama faked his transcripts. Heres the bottom line: Obamas real father was Apollo Creed and he WAS married to his gay Pakistani roommate, Freddy Mercury. Little known fact, Mercury died of aids. Could Obama have been patient 0? Yes, because he is literally 0bama. Case closed. Fluorescent Testicle:StopArrestingMe: This type of thing gets on my nerve and I hate how they grouped the obviously false stuff (time travel on Mars) with the true stuff (born in Kenya). Trying to make all of us who know the truth look silly with there stupid venn. U.S. patroits wont be deterred by this tactic though. You want conspiracies then look at libs who said that Trig Palin isnt really Sarahs son and she she didnt give birth in Juno. Thats false and its blood libel. Michelle Obama saying Whitey? True. She has a very mercurial temper and is racist and she said it in the heat of the moment. Obama is the anti christ? True. He is proof that true evul can prevail when evulution is taught in schools and gays allowed to flourish. He will bring 100 million muslins to the US? True. He wants to wipe Isreal from the map turning it into a flaming Jew pit. Earned his funds from a Saudi Prince? True. He recruited Dodi Fayed to kill Princess Diana for the Saudi Royal family and was rewarded with a fortune. And he caused the shooting in Aurora, the Neptunes ghostwrote his love raps, the list goes on and on. People call Romney a plutocrat but Obama is the one trying to make the government rich off taxing the working man, and his cup runneth over. And if you took a look at the cup Id bet that youd find that it is all being given to the Muslin Brotherhood. By the way Romney is a genius who actually took his SAT earning his way into college. Obama faked his transcripts. Heres the bottom line: Obamas real father was Apollo Creed and he WAS married to his gay Pakistani roommate, Freddy Mercury. Little known fact, Mercury died of aids. Could Obama have been patient 0? Yes, because he is literally 0bama. Case closed. StopArrestingMe:This type of thing gets on my nerve and I hate how they grouped the obviously false stuff (time travel on Mars) with the true stuff (born in Kenya). Trying to make all of us who know the truth look silly with there stupid venn. U.S. patroits wont be deterred by this tactic though. You want conspiracies then look at libs who said that Trig Palin isnt really Sarahs son and she she didnt give birth in Juno. Thats false and its blood libel. Michelle Obama saying Whitey? True. She has a very mercurial temper and is racist and she said it in the heat of the moment. Obama is the anti christ? True. He is proof that true evul can prevail when evulution is taught in schools and gays allowed to flourish. He will bring 100 million muslins to the US? True. He wants to wipe Isreal from the map turning it into a flaming Jew pit. Earned his funds from a Saudi Prince? True. He recruited Dodi Fayed to kill Princess Diana for the Saudi Royal family and was rewarded with a fortune. And he caused the shooting in Aurora, the Neptunes ghostwrote his love raps, the list goes on and on. People call Romney a plutocrat but Obama is the one trying to make the government rich off taxing the working man, and his cup runneth over. And if you took a look at the cup Id bet that youd find that it is all being given to the Muslin Brotherhood. By the way Romney is a genius who actually took his SAT earning his way into college. Obama faked his transcripts. Heres the bottom line: Obamas real father was Apollo Creed and he WAS married to his gay Pakistani roommate, Freddy Mercury. Little known fact, Mercury died of aids. Could Obama have been patient 0? Yes, because he is literally 0bama. Case closed. StopArrestingMe:This type of thing gets on my nerve and I hate how they grouped the obviously false stuff (time travel on Mars) with the true stuff (born in Kenya). Trying to make all of us who know the truth look silly with there stupid venn. U.S. patroits wont be deterred by this tactic though. You want conspiracies then look at libs who said that Trig Palin isnt really Sarahs son and she she didnt give birth in Juno. Thats false and its blood libel. Michelle Obama saying Whitey? True. She has a very mercurial temper and is racist and she said it in the heat of the moment. Obama is the anti christ? True. He is proof that true evul can prevail when evulution is taught in schools and gays allowed to flourish. He will bring 100 million muslins to the US? True. He wants to wipe Isreal from the map turning it into a flaming Jew pit. Earned his funds from a Saudi Prince? True. He recruited Dodi Fayed to kill Princess Diana for the Saudi Royal family and was rewarded with a fortune. And he caused the shooting in Aurora, the Neptunes ghostwrote his love raps, the list goes on and on. People call Romney a plutocrat but Obama is the one trying to make the government rich off taxing the working man, and his cup runneth over. And if you took a look at the cup Id bet that youd find that it is all being given to the Muslin Brotherhood. By the way Romney is a genius who actually took his SAT earning his way into college. Obama faked his transcripts. Heres the bottom line: Obamas real father was Apollo Creed and he WAS married to his gay Pakistani roommate, Freddy Mercury. Little known fact, Mercury died of aids. Could Obama have been patient 0? Yes, because he is literally 0bama. Case closed.
Liquid chromatographic determination of a non-steroidal oral contraceptive CDRI-85/287 in rat serum. A precise and sensitive high performance liquid chromatographic (HPLC) assay method was developed and validated for the quantitation of 2-[4-(2-piperidinoethoxy) phenyl]-3-phenyl-(2H)-1-benzo(b)pyran (compound CDRI-85/287) in rat serum. This method, applicable to 0.5 ml volumes of serum, was validated according to GLP guidelines. It involved double extraction of serum samples with a mixture of hexane and iso-propanol (98:2 v/v) at alkaline pH and the use of UV detection at 332 nm. Linearity, precision and accuracy were acceptable (5-200 ng ml-1. The absolute recovery was more than 75% and the lower limit of quantitation was 5 ng ml-1. Freeze-thaw stability studies up to four cycles showed no apparent differences in the calculated spiked concentrations. However, in-process stability evaluation showed the stability of the processed samples lasted up to 85 h.
Such has been the fanfare about the controversial Carroll, it seems that just about every Premier League defence has struggled to contain the long haired Toon Army idol. That was until Newcastle hosted Fulham on Saturday. Back on his old stomping ground, Hughes excelled in the scoreless draw, dealing superbly with the threat of Carroll. I couldn't believe it when Aaron actually received some credit for his performance. Normally he is ignored and undervalued by pundits and the English scribes, while other less deserving souls grab the limelight. The 31-year-old from Cookstown wouldn't have it any other way. The Northern Ireland captain is modest to a fault, preferring to praise others for their contribution to the cause, even when his own is the most telling, as was the case against Carroll at the weekend. You see, Aaron's not just a class act on the field, he's exactly the same off it. He will be embarrassed by this but once in a poll conducted by yours truly, while travelling with the Northern Ireland team, a group of journalists voted Hughes the nicest guy in football. It was a landslide. Down the years many of football's biggest names have selected Aaron too. He's now played almost 500 club games and close to 400 in the Premier League, plus 74 caps have been won. Not bad for a guy who is barely mentioned in most match reports. He is appreciated though by team-mates, who would tell you that his composure and selfless nature in the heat of battle gives them confidence. Fulham's Brede Hangeland loves playing alongside Aaron at club level. Ditto Stephen Craigan on the international front. Jonny Evans will enjoy the experience in central defence tonight against Morocco at Windsor Park. Players like him. So too most managers he has played under. When Kenny Dalglish was in Belfast recently, he spoke eloquently about his admiration of how his former player at Newcastle has shone at top flight level for so long. Ruud Gullit, David O'Leary, Roy Hodgson and current Fulham boss Mark Hughes would be equally respectful of the former Newcastle and Aston Villa ace. Had the great Sir Bobby Robson still been alive, he would pay a handsome tribute while his international bosses Lawrie McMenemy, Sammy McIlroy, Lawrie Sanchez and now Nigel Worthington class the defender as the model pro. Since making his first team debut as a 17-year-old at the Nou Camp in 1997 for Newcastle against Barcelona, only two managers have failed to realise the value of Hughes. Graeme Souness broke Aaron's heart in 2005 when he told him his Newcastle days were over. Souness decided that having signed Jean-Alain Boumsong for £8.5 million the consistency of Hughes was surplus to requirements. A shocking decision which still brings Geordies out in a cold sweat. Intriguingly the other boss who never truly realised what Hughes offers a team was his countryman Martin O'Neill, when he took over at Aston Villa. In contrast Fulham boss Mark Hughes fully appreciates the player. He says: “Aaron has been outstanding since I became Fulham manager. He doesn't get headlines, but everyone here knows how vital he is to the team.” All this begs the question, just how good is Aaron? Well, right now it is difficult to name a more consistent British defender. Manchester United's Rio Ferdinand and Chelsea's John Terry are the obvious choices, but while Hughes is getting better with age, the powers of England's first choice defenders are dwindling. Pity they couldn't say the same about the number of injuries they suffer these days. Tottenham's Ledley King is a quality performer, but Royal weddings come around more often than his appearances. Look at Manchester City and all the publicity generated by Joleon Lescott's outrageous £22 million move from Everton, yet he doesn't get anywhere near the team. Hughes has been an ever present for Fulham this season. And thankfully, while others have pulled out quicker than Gillian McKeith in a Bushtucker trial on I'm a Celebrity... Aaron will be at Windsor tonight leading Northern Ireland. Your Comments COMMENT RULES: Comments that are judged to be defamatory, abusive or in bad taste are not acceptable and contributors who consistently fall below certain criteria will be permanently blacklisted. The moderator will not enter into debate with individual contributors and the moderator’s decision is final. It is Belfast Telegraph policy to close comments on court cases, tribunals and active legal investigations. We may also close comments on articles which are being targeted for abuse. Problems with commenting? customercare@belfasttelegraph.co.uk
Q: How do you calculate if 2 lines are facing toward or away? Given the 4 points describing 2 line segments, how do you calculate if line A is towards or away from line B? The 2 lines have a fixed length, and can be measured as distance from x1/y1 to x2/y2. A: Let A and B be two points on the black line. Let C and D be your blue segment. The sign of the z coordinate of cross product AB^AC tells you whether C is "left" or "right" of the black line. Similarly, cross product AB^CD tells you whether CD steers "left" or "right" of the black line. We don't really want to know whether it's left or right; all we want is to make sure they're the same direction or the opposite direction, that's why we multiply the two values. The following pseudocode should therefore work: z1 = (xB-xA)*(yC-yA) - (yB-yA)*(xC-xA); z2 = (xB-xA)*(yD-yC) - (yB-yA)*(xD-xC); z3 = z1 * z2; if (z3 < 0) ; /* Pointing towards (BUT maybe even crossing) */ else if (z3 > 0 || z2 != 0) ; /* Pointing away */ else ; /* Parallel */ I am afraid I need some time to write a proper solution for the Bezier curve. Is the following situation towards or away?
Q: PHP Pass By Refernce logic explanation Can anyone explain me the output of the following PHP script: $a = array ('zero','one','two'); foreach ($a as &$v) { } foreach ($a as $v) { } print_r($a); Output: Array ( [0] => zero [1] => one [2] => one ) A: Passing by reference, you can change value inside or outside loop. <?php $a = array ('zero','one','two'); foreach ($a as &$v) { } // before loop $v is reference to last item in array $a // if you perform unset($v) before this loop, nothing will change in $a foreach ($a as $v) { // here you assigning $v values from this array in loop } print_r($a); // Array // ( // [0] => zero // [1] => one // [2] => one // )
2012 free agents by team The following list was provided by the NFLPA. It includes the players who became free agents on March 13. Players are listed alphabetically by team, with their position and the number of years they have accrued in the NFL.
--- author: - 'Mats G. Löfdahl' date: Accepted 13 September 2010 title: 'Evaluation of image-shift measurement algorithms for solar Shack–Hartmann wavefront sensors' --- [Solar Shack–Hartmann wavefront sensors measure differential wavefront tilts as the relative shift between images from different subapertures. There are several methods in use for measuring these shifts.]{} [We evaluate the inherent accuracy of the methods and the effects of various sources of error, such as noise, bias mismatch, and blurring. We investigate whether Z-tilts or G-tilts are measured.]{} [We test the algorithms on two kinds of artificial data sets, one corresponding to images with known shifts and one corresponding to seeing with different $r_0$.]{} [Our results show that the best methods for shift measurements are based on the square difference function and the absolute difference function squared, with subpixel accuracy accomplished by use of two-dimensional quadratic interpolation. These methods measure Z-tilts rather than G-tilts.]{} Introduction {#sec:introduction} ============ High-resolution observational astronomy with telescopes operated on the ground relies on methods for combating the effects from turbulence in the Earth’s atmosphere. Turbulence varies with time scales on the order of ms, mixing air that varies in temperature and therefore in refractive index. The wavefronts of the light from astronomical objects, which are flat when they enter the atmosphere, are distorted by the optically active atmosphere, which causes image motion and blurring in the images collected through the telescope. Measurements of the wavefronts allow us to correct for these effects, both in real time and in post processing. There are several different methods for measuring in real time the variation of the wavefront phase of the light over the telescope pupil. For solar telescopes, the one exclusively used is the Shack–Hartmann [SH; @shack71production] wavefront sensor (WFS). This is true for the adaptive optics (AO) systems of present 1-meter class, high-resolution solar telescopes [@1998SPIE.3353...72R; @scharmer00workstation; @scharmer03adaptive; @2003SPIE.4853..187V; @2004SPIE.5171..179R]. SH wide-field WFSs are used for characterizing the atmospheric turbulence above existing and future solar telescopes [@waldmann07untersuchung; @2008SPIE.7015E.154W; @scharmer10s-dimm+]. An SH WFS estimates the wavefront from measurements of the local wavefront tilt in several regions of the pupil called subpupils. The local tilts manifest themselves as image motion in the focal plane associated with each subpupil. For night-time telescopes, this image motion is (comparably) easily measured by tracking the peak of the image of a natural or artificial point source. As no point sources are available on the sun, one instead has to resort to measuring relative image motion by matching solar features in the different subpupil images. This can be done by finding the minimum of the degree of mismatch of the images as a function of image shift. This procedure requires solving two distinct subproblems: 1) computing the mismatch function as a function of image shift in steps of whole pixels and 2) finding the minimum of the mismatch function with subpixel precision. The latter involves interpolation between the whole-pixel grid points. This procedure for solar correlation tracking was apparently pioneered by @1977lock.reptR....S and . The SH WFSs developed for the AO systems currently in use at different solar telescopes use a variety of shift measurement methods, so do the sensors used for turbulence characterization. This suggests that the choices of methods were based on personal preferences and technological momentum from other applications rather than on a thorough evaluation of the performance of these methods on relevant data. Several algorithms for measuring the mismatch and performing the interpolation were investigated by @yi92software. However, because the purpose of their work was different from ours (image remapping in post-processing), they used much larger fields of view (FOVs) than is currently practical for real-time solar SH WFS. As part of a project on characterization of the atmospheric turbulence using SH WFS, @waldmann07untersuchung and @2008SPIE.7015E.154W address these problems for setups more similar to ours. The recent MSc thesis of @johansson10cross-correlation has expanded on Waldmann’s work in this area. She implemented a number of different methods for image-shift measurements and tested them on synthetic data relevant to solar SH WFSs. Comparisons with their methods and results will appear throughout this paper. ------------------------------------------------------------------------------------------------------------------------------------------------------------ Acronym Name Mismatch, $c_{i,j}$, for shift $i,j$ --------- --------------------------------------- ---------------------------------------------------------------------------------------------------------- SDF Square Difference Function $ \displaystyle \sum_{x,y} \bigl( g(x,y) - g_\text{ref}(x+i,y+j) \bigr)^2$ CFI Covariance Function, image domain $\displaystyle -\sum_{x,y} \protect\overbracket[.5pt][1pt]{g}(x,y)\cdot \protect\overbracket[.5pt][1pt]{g_\text{ref}}(x+i,y+j)$ CFF Covariance Function, Fourier domain $\displaystyle -\mathfrak{F}^{-1}\bigl\{ \mathfrak{F}\{ w_2\cdot\protect\overbracket[.5pt][1pt]{g}(x,y) \} \cdot \mathfrak{F}^*\{ w_2\cdot\protect\overbracket[.5pt][1pt]{g_\text{ref}}(x,y) \} \bigr\}(i,j)$ ADF Absolute Difference Function $\displaystyle\sum_{x,y} \bigl| g(x,y) - g_\text{ref}(x+i,y+j) \bigr|$ ADF$^2$ Absolute Difference Function, Squared $\displaystyle\biggl( \sum_{x,y} \bigl| g(x,y) - g_\text{ref}(x+i,y+j) \bigr| \biggr)^2$ ------------------------------------------------------------------------------------------------------------------------------------------------------------ In this paper we investigate, by use of artificial data relevant to present solar SH WFSs, a number of different algorithms for measuring whole-pixel image shifts and interpolating to get subpixel accuracy. The algorithms are defined in Sect. \[sec:algorithms\]. We investigate the inherent accuracy of the algorithms in Sect. \[sec:algorithm-accuracy\], by testing them on identical images with known shifts, as well as the influence of noise and variations in intensity level. In Sect. \[sec:image-shift-versus\] we use the best methods on images formed through artificial seeing and evaluate the performance for different seeing conditions. We discuss our results in Sect. \[sec:conclusions\]. Algorithms {#sec:algorithms} ========== Correlation algorithms {#sec:corr-algor} ---------------------- In Table \[tab:shift\_algorithms\] we define five different correlation algorithms (CAs), which we use to calculate the image mismatch on a grid of integer pixel shifts $i,j$. The names and acronyms of the CAs are also given in the table. These mismatch values make a matrix $\mathbf c$ with elements $c_{i,j}$. A coarse estimate of the image shift, $(\delta x,\delta y)$, is then given by the indices corresponding to the grid position with the minimum mismatch value, $(i_\text{min},j_\text{min})$. This shift should be sought within a maximum range in order to reduce the number of false matches to other parts of the granulation pattern. The algorithms in Sect. \[sec:subpixel-interp\] are then used to refine this estimate to subpixel accuracy. Perhaps most straight-forward is the SDF algorithm, in which one calculates the mismatch in a Least Squares (LS) sense. Subtracting the intensity mean, expanding the square in the SDF algorithm, and retaining only the cross term gives twice the covariance (with negative sign), which is the basis of the following two algorithms. The CFI algorithm calculates the covariance in the image domain. It is the one being used for the Dunn Solar Telescope system [@1998SPIE.3353...72R]. The correlation coefficient differs from the covariance only by division with the standard deviations of the two images, so methods based on the former [e.g., @2008SPIE.7015E.154W] should give results similar to those of CFI. The covariance can also be calculated in the Fourier domain, taking advantage of the Fast Fourier Transform (FFT). The CFF algorithm was developed by for an image stabilization system and is used today in the KAOS AO implementation used at the Vacuum Tower Telescope [@2003SPIE.4853..187V]. For small images, such as those involved in SH WFS calculations, one can expect errors from wrap-around effects. That is, because of the assumption of periodicity implicit in finite-size Fourier transforms, for large shifts, structures in one image are not matched with structures at the shifted location but with structures shifted in from the opposite side of the image. @knutsson05extended derived a method based on the Fourier spectrum of the correlation function (correlation spectrum phase). It is supposed to give results similar to the CFF method but with some accuracy sacrificed for speed. We have not investigated this method. The CFF method requires apodization of the images, i.e., the multiplication of a window function that reduces ringing effects from the discontinuities caused by the Fourier wrap-around. When not explicitly stated otherwise, we use a Hamming window written as $$w_2(x,y) = w_1(x)\cdot w_1(y),$$ where $w_1$ is the 1-dimensional Hamming window [@enochson68programming], $$w_1(x) = a+(a-1)\cos \Bigl(\frac{2\pi x}{N-1}\Bigr),$$ where $a=0.53836$ and $N$ is the linear size of the window. See also Sect. \[sec:window-function\] below. The ADF algorithm is fast because it can be calculated very efficiently in CPU instructions available for many architectures, particularly for 8-bit data. @2003SPIE.4853..351K use ADF for the IR AO system of the McMath–Pierce solar telescope. So do @2009OptRv..16..558M in their recently presented AO system used for the domeless solar telescope at Hida Observatory. Because the shape of the ADF minimum does not match the assumption of a parabolic shape implicit in the subpixel algorithms, squaring the ADF correlation values leads to an improvement. This adds a completely negligible computational cost to that of ADF, as squaring is done after summing. In fact, because it does not move $(i_\text{min},j_\text{min})$, only the at most nine grid points used for subpixel interpolation (see Sect. \[sec:subpixel-interp\]) have to be squared. This ADF$^2$ method was developed by G. Scharmer in 1993 for use in the correlation tracker systems of the former Swedish Vacuum Solar Telescope [@shand95latency] and is in use in the AO and tracker systems of the Swedish 1-meter Solar Telescope [SST; @scharmer00workstation; @scharmer03adaptive]. @1977lock.reptR....S showed that a linear trend in intensity shifts the covariance peak from the correct position. Therefore one should subtract a fitted plane from both $g$ and $g_\text{ref}$ before applying the CAs. However, the granulation data used in our simulations have negligible trends and for the difference based algorithms (ADF, ADF$^2$, and SDF), a consistent bias in the intensity level cancels automatically. We therefore saved computing time in our tests by limiting the pre-processing of the data to only subtracting the mean values, and only when calculating CFI and CFF. ------------------------------------------------------------------------------------------------------------------------------------------------------ Acronym Name Location of minimum --------- ---------------------------- --------------------------------------------------------------------------------------------------------------- 2LS 2D Least Squares $(x_2,y_2)$where $ \begin{cases} a_2 = \bigl(\langle s_{1,j}\rangle_j - \langle s_{-1,j}\rangle_j\bigr)/2 \\ a_3 = \bigl(\langle s_{1,j}\rangle_j - 2\langle s_{0,j}\rangle_j + \langle s_{-1,j}\rangle_j\bigr)/2 \\ a_4 = \bigl(\langle s_{i,1}\rangle_i - \langle s_{i,-1}\rangle_i\bigr)/2 \\ a_5 = \bigl(\langle s_{i,1}\rangle_i - 2\langle s_{i,0}\rangle_i + \langle s_{i,-1}\rangle_i\bigr)/2\\ a_6 = (s_{1,1}-s_{-1,1}-s_{1,-1}+s_{-1,-1})/4 \end{cases} $ 2QI 2D Quadratic Interpolation $(x_2,y_2)$where $ \begin{cases} a_2 = (s_{1,0}-s_{-1,0})/2\\ a_3 = (s_{1,0}-2s_{0,0}+s_{-1,0})/2\\ a_4 = (s_{0,1}-s_{0,-1})/2\\ a_5 = (s_{0,1}-2s_{0,0}+s_{0,-1})/2\\ a_6 = (s_{1,1}-s_{-1,1}-s_{1,-1}+s_{-1,-1})/4 \end{cases} $ 1LS 1D Least Squares $(x_1,y_1)$using$a_2, a_3, a_4, a_5$ from 2LS above 1QI 1D Quadratic Interpolation $(x_1,y_1)$using$a_2, a_3, a_4, a_5$ from 2QI above ------------------------------------------------------------------------------------------------------------------------------------------------------ Subpixel interpolation {#sec:subpixel-interp} ---------------------- The methods in Sect. \[sec:corr-algor\] are responsible for making a coarsely sampled 2D correlation function with a reasonable shape. The interpolation algorithms (IAs) in this section then have to find the minimum with better accuracy than given by the sampling grid. @1988ApJ...333..427N found that interpolation methods should be based on polynomials of degree 2. Methods based on polynomials of higher degree systematically underestimate the shift for small displacements, while first degree polynomials give a systematic overestimation. The algorithms we consider can all be described as fitting a conic section, $$f(x,y) = a_1 + a_2 x + a_3 x^2 + a_4 y + a_5 y^2 + a_6 xy,$$ to the 3$\times$3-element submatrix $\mathbf s$ of $\mathbf c$ centered on the sample minimum of $\mathbf c$ with elements $$\label{eq:1} s_{i,j}=c_{i+i_\text{min},j+j_\text{min}}; \qquad i,j=-1,0,1.$$ The interpolated shift vector, $(\delta x,\delta y)$, is the position of the minimum of the fitted function, $(x_\text{min},y_\text{min})$. The algorithms differ in the number of points used and whether the fitting is done in 2D or in each dimension separately. The definitions, names, and acronyms of the different methods are given in Table \[tab:subpix\_algorithms\]. The 1QI method is based on numerical 1D derivatives using the center row (column) of $\mathbf s$ [@1986ApOpt..25..392N]. It is equivalent to a LS estimate using only the center row (column) of $\mathbf s$. It does not use the corner elements of $\mathbf s$. The 1LS algorithm consists of, separately for the $x$ and $y$ directions, fitting a 1D polynomial of degree 2 to all the elements in $\mathbf s$. This is equivalent to the procedure of @waldmann07untersuchung, projection of the data onto the axes (i.e., summing the rows (columns)), and doing LS fitting on the result. Waldmann uses Lagrange interpolation but this is mathematically equivalent to a LS fit. The 2QI algorithm was derived by @yi92software [their Eq. (9)] as an extension of the 1QI algorithm. The 2LS algorithm is based on expressions for the conic section coefficients derived by @waldmann07untersuchung. @johansson10cross-correlation found that Waldmann’s expression for one of the 2LS coefficients is missing a factor 2. We are using the corrected expression. @waldmann07untersuchung compared four different subpixel methods: 1LS, 2LS, and a 2D fit to a Gaussian. He found that the Gaussian fit gave the best results and the polynomial method worked almost as well. Because the Gaussian fit is more computationally heavy, he adopted the latter. @johansson10cross-correlation tested the Gaussian fit and got results comparable to Waldmann’s but much better results for the polynomial fits. Based on Johansson’s results, we have not evaluated Gaussian fits as a method for subpixel interpolation. @2009OptRv..16..558M use a method for subpixel interpolation, where they find the centroid of a spot generated by inverting and then clipping the mismatch measured with ADF. This method is not considered here. Algorithm accuracy {#sec:algorithm-accuracy} ================== The goal of this experiment with artificial data is to establish the accuracy of the algorithms themselves, for granulation images (GI), which are identical except for a known shift and detector imperfections such as noise and bias. In reality, the images will be different because the atmospheric turbulence not only shifts the images but also smears them differently. Those effects will be addressed in Sect. \[sec:image-shift-versus\] below. Artificial data {#sec:perfect-data-recipe} --------------- For this experiment, the images should be identical except for a known shift, defined to subpixel precision without introducing errors that are caused by the re-sampling needed for subpixel image shifting. We therefore start with a high-resolution image, degrade the resolution, shift it by a known number of whole high-resolution pixels, and then down-sample it to the SH image scale. Specifically, we use a 2000$\times$2000-pixel, high-quality SST G-band image with an image scale of 0041/pixel, see Fig. \[fig:GI\]. The image was recorded on 25 May 2003 by Mats Carlsson et al. from ITA in Oslo and corrected for atmospheric turbulence effects by use of Multi Frame Blind Deconvolution [@lofdahl02multi-frame]. We degraded it to 9.8 cm hexagonal (edge to edge) subpupil resolution at 500 nm. This degraded image was shifted by integer steps from 0 to 20 times the high-resolution pixels, as well as in steps of 10 pixels from 30 to 60. The so degraded and shifted GIs were box-car compressed by a factor 10 to 200$\times$200 pixels of size 041. This procedure gives images with known subpixel shifts, $\delta x$ and $\delta y$, without any re-sampling, except for the compression. Figure \[fig:degraded\] shows a sample compressed image. The data with 0–20 high-resolution pixel shifts were made to test subpixel accuracy, while the 30–60-pixel shifts are for testing linearity with larger shifts. The diffraction limited resolution, $\lambda/D_\text{sub}\approx1\arcsec$ at 500 nm, corresponds to $>2$ pixels. This means subpixel accuracy corresponds to super-resolution accuracy. The resulting images had more contrast than the real data from our SH WFS, so some bias was added to change the RMS contrast to $\sim$3% of the mean intensity. The resulting images were stored in two versions, with and without Gaussian noise with a standard deviation of 0.5%[^1]. The digitization noise of a 12-bit camera is insignificant compared to the Gaussian noise but may be significant for an 8-bit camera. We do not include the effects of digitization in our simulations. Processing {#sec:perfect-processing} ---------- The 200$\times$200-pixel FOV is much larger than the FOV of the SH WFS, which allows the use of many different subfields in order to get better statistics. Centered on each of 17$\times$17 grid positions, subimages, $g$, of size 16$\times$16 or 24$\times$24 pixels, were defined. The subimages defined in the unshifted reference image, $g_\text{ref}$, were larger in order to accommodate a shift range limited to $\pm8$ pixels along each axis direction, except for CFF, which uses two images of equal size. Note also that for CFF, the size of the correlation matrix is limited by the subimage size. For 16$\times$16-pixel subfields, this limits the range to $\pm6$ pixels (in reality to even less). The different sizes of $g$, 16$\times$16 and 24$\times$24 pixels, have two purposes: 1) We want to see how a change in size affects some of the methods and 2) we will compare CFF using 24$\times$24 pixels with the other methods using 16$\times$16 pixels. If the image geometry on the detector accommodates a 24$\times$24-pixel $g_\text{ref}$, then it can also accommodate 24$\times$24-pixel $g$ subfields if no oversize reference image is needed. We measured the shifts with each combination of CAs in Sect. \[sec:corr-algor\] and IAs in Sect. \[sec:subpixel-interp\]. We do this with and without noise and with and without multiplying the reference image by 1.01, giving an approximate 1% bias mismatch. The bias mismatch sensitivity is investigated because it is known to be a problem with the ADF and ADF$^2$ methods. ![image](15331fg2a){width="\figwidth"} ![image](15331fg2b){width="\figwidth"}\ ![image](15331fg2c){width="\figwidth"} ![image](15331fg2d){width="\figwidth"} For each real shift, $\delta x_\text{real}$ (and $\delta y_\text{real}$, we will simplify the notation by referring to both axis directions with $x$ when possible), we calculate the mean and standard deviation, $\sigma$, of the measured shifts, $\delta x_\text{measured}$, by use of the outlier-robust statistics based on Tukey’s Biweight as implemented in the IDL Astronomy User’s Library [@1993ASPC...52..246L]. After removing $4\sigma$ outliers, we fit the data to the relationship $$\label{eq:linesineone} \delta x_\text{measured} = p_0 + p_1\delta x_\text{real} + p_2\sin(a\, \delta x_\text{real}),$$ where $a=2\pi/1$ pixel, by use of the robust nonlinear LS curve fitting procedure MPFIT [@more78levenberg; @2009ASPC..411..251M]. The linear coefficient, $p_1$, of these fits is listed in the tables below. Results {#sec:perfect-results} ------- In Fig. \[fig:perfect-scatter-four-methods\], the SDF, ADF$^2$, CFI, and CFF CAs are compared for the case of no noise and no bias mismatch, using the 2QI IA and 24$\times$24-pixel subfields. The errors are a mix of systematic and random errors. The SDF and ADF$^2$ algorithms give a tighter correlation between the true and measured image shifts than the CFI and CFF algorithms. @1988ApJ...333..427N found similar undulating effect of small errors near whole and half pixel shifts calculated by CFF and larger errors in between. @ballesteros96two-dimensional make a similar observation with ADF. The ADF and CFF methods produce many outliers, which is the main reason we need the robust statistics mentioned in Sect. \[sec:perfect-processing\]. A *very* small fraction of these outliers, on the order $10^{-5}$ of all cases for CFF, zero for all other CAs, are caused by the correlation matrix minimum falling on an outer row or column. The rest are caused by false minima in the correlation matrix, which happen to be deeper than the real minimum. This can happen occasionally if a secondary minimum is located on or near a grid point, while the real minimum is between grid points. The effect is more severe for ADF because of the more pointy shape of its minimum. For CFF, minima corresponding to large shifts are attenuated by two effects: apodization lowers the intensity away from the center of the subimage and the digital Fourier transform wraps in mismatching structure from the other side of the subimage. This can lead to detection of false minima corresponding to small shifts. The dashed line in the CFF panel represents the linear part of the fit to Eq. (\[eq:linesineone\]); its slope is $p_1-1$. The CFF method systematically underestimates large shifts. Comparison between the fitted line and the mean values shows a slight nonlinearity in the CFF method, making the underestimation worse for larger shifts. The deviation of $p_1$ from unity and the undulations are systematic errors. The former can be calibrated while the latter mix with the random errors represented by the error bars. ![image](15331fg3a){width="\figwidth"} ![image](15331fg3b){width="\figwidth"}\ ![image](15331fg3c){width="\figwidth"} ![image](15331fg3d){width="\figwidth"} Figure \[fig:perfect-scatter-SDF-different-versions\] illustrates the effects of noise and of bias mismatch for SDF. Here we limit the plots to $\delta r=(\delta x_\text{real}^2+\delta y_\text{real}^2)^{1/2}\le 2$ pixels. The blue line corresponds to the fit of Eq. (\[eq:linesineone\]). The amplitudes of the undulations, $p_2$, is $\sim$0.01 pixel. Noise does not appear to change the undulating errors significantly but it does make the random errors more dominant. Bias mismatch has a similar effect and overwhelms the differences due to noise. It also appears to change a minute systematic overestimation of the shifts to an equally small underestimation. ### Tables {#sec:tables} The complete results of these simulations are presented in Tables \[tab:sim-n0b0\]–\[tab:sim-n5b0-flat\]. While the standard deviations in the plots are for each real shift individually, and therefore do not include the undulations, the standard deviations in the tables are for intervals of real shifts and therefore do include the undulations. The measured shifts might be expected to have near-Gaussian distributions, which is true for ADF$^2$ and SDF and mostly for CFI. ADF often has complicated, multi-peak distributions. All CFF distributions are double-peaked and/or asymmetrical. We give the results for all shifts, $\delta r$, but also separately for small shifts ($<1$ pixels and $<2$ pixels, resp.), medium shifts (shifts between 3 and 5 pixels in length), and large shifts ($>5$ pixels). The small shifts are relevant to AO performance in closed loop. The large shift results tell us something about performance in open loop, which is relevant for wavefront sensor calibration, site testing, image restoration, and when trying to close an AO loop. ### Identical images {#sec:perfect-data} In Table \[tab:sim-n0b0\], we show the performance of the different methods with noise free data. We begin by noting that in many cases the errors for large shifts are smaller than those for small shifts. This may seem counterintuitive but it is simply a consequence of using only whole pixel shifts for the larger shifts. The IAs have smaller systematic errors on the grid points, see the undulations in Fig. \[fig:perfect-scatter-four-methods\]. The ADF CA produces many $4\sigma$ outliers. It is not surprising that it gives much worse results than ADF$^2$, because the two CAs by definition share the location of the whole pixel minimum but the ADF minimum does not have the parabolic shape assumed by the IAs. SDF and ADF$^2$ are clearly better than the CFI and CFF methods. CFF can compete for the very smallest shifts ($<1$ pixel) if we use 24$\times$24 pixels for CFF and 16$\times$16 pixels for the other CAs (which is the most fair comparison, since the optics need to accommodate 24$\times$24 pixels in order to get the oversize reference image for the non-CFF CAs). Except for CFF, the errors appear more or less independent of the magnitude of the shifts. CFF deteriorates significantly in three ways at larger shifts: The number of outliers increases, the random error increases, and the shifts are systematically underestimated, as shown by the nonunity slopes. The former two effects can be explained by the assumption of periodicity of the digital Fourier transform. The signal for large shifts is diluted by mismatching granulation shifted in from the opposite end of the FOV. For SDF and ADF$^2$, the 2D IAs are clearly better than the 1D ones. 2QI is marginally better than 2LS. The best results with SDF and ADF$^2$ are an error RMS of less than 0.02 pixels, corresponding to 0008. Increasing the subfield size from 16 to 24 pixels squared reduces the error by approximately 30%. ### Noisy images {#sec:noisy-data} Adding 0.5% noise to the images give the results shown in Table \[tab:sim-n5b0\]. The errors grow but the behavior is similar to the zero-noise case. The best results with SDF and ADF$^2$ 16$\times$16 are an error RMS of less than 0.03 pixels, corresponding to 0012. CFF with 24$\times$24 gives similar results for small shifts. As in the case of zero noise, increasing the subfield size from 16 to 24 pixels squared reduces the error by approximately 30%. With 24$\times$24 pixels, SDF and ADF$^2$ give results similar to the 16$\times$16-pixel zero-noise case. Surprisingly, the number of outliers for ADF is significantly reduced by the added noise. But $\sigma$ increases more than for ADF, so this may be an effect of making the error distribution more Gaussian. ### Bias mismatch {#sec:bias-mismatch} Subtracting a fitted plane (or just the mean intensity as in our experiment, see Sect. \[sec:corr-algor\]) from each subimage removes mismatches in intensity bias for CFI and CFF. We did not do this for the difference based CAs (SDF, ADF, and ADF$^2$), where a consistent bias cancels. However, if there is a bias mismatch between the images this cancellation is not effective. Such bias mismatch could come from, e.g., variations in a thin cloud layer in the case when $g_\text{ref}$ is not from the same exposure as $g$, or small drifts in the pupil location on the SH, causing variations in the light level from the outermost subpupils. We re-processed all the data after multiplying the reference image by 1.01, introducing a 1% bias mismatch with the other images. In Tables \[tab:sim-n0b1\] and \[tab:sim-n5b1\] we show the results for the difference based CAs. As expected, the CFI and CFF results did not change from the ones in Tables \[tab:sim-n0b0\] and \[tab:sim-n5b0\] and they are therefore not repeated. For small shifts, the SDF and ADF$^2$ results are now worse than for CFF, even when comparing the methods using the same image size. SDF is more robust against bias mismatch than ADF$^2$. There is now no real difference between ADF and ADF$^2$, possibly indicating that the parabolic shape of the ADF$^2$ correlation function is destroyed by the bias mismatch. The SDF error RMS is 0.7 pixels for 16$\times$16 and 0.4 pixels for 24$\times$24, which makes it the best method for large shifts. With a bias mismatch, 0.5% noise added to the images does not significantly change the results for the difference based CAs. We conclude that bias mismatch should be removed in pre-processing by subtraction of the intensity mean. ### Window function {#sec:window-function} The CFF method requires apodization, i.e., the multiplication of the subimage (after subtracting the fitted plane) by a window function. The intention is to reduce ringing effects from the discontinuities caused by Fourier wrap-around. For the results above, we used a 2D Hamming window. In Tables \[tab:sim-n0b0-flat\]–\[tab:sim-n5b0-flat\] we show the results when we instead use a window with the center $\sim$50% of the area flat, and a taper only in the pixels outside this area. This is similar to the window used by @waldmann07untersuchung. See Fig. \[fig:windows\] for the two types of window functions. ![Window functions, 24$\times$24 pixels. **Left:** Hamming window. **Right:** Flat-top window.[]{data-label="fig:windows"}](15331fg4a "fig:"){width="0.4\linewidth"} ![Window functions, 24$\times$24 pixels. **Left:** Hamming window. **Right:** Flat-top window.[]{data-label="fig:windows"}](15331fg4b "fig:"){width="0.4\linewidth"} The results are mixed. The errors are larger but there are fewer outliers. The results are similar with and without noise. The systematic underestimation is reduced, resulting in slopes closer to unity. Based on these data we cannot say which window is better, they would have to be evaluated specifically for any new situation where one wants to use CFF. The important result for our purposes is that the comparison between SDF and ADF$^2$ vs CFF does not depend on the window function used. Image shift as a measure of wavefront tilt {#sec:image-shift-versus} ========================================== For wavefront sensing, the quantities that really need to be measured are the average wavefront gradients at the positions corresponding to the subapertures. One assumes that a shift in image position corresponds perfectly to the average gradient of the wavefront across the subaperture. However, in addition to local gradients, continuous wavefront aberrations across the telescope aperture also result in local wavefront curvature. Is the assumption valid anyway and how good is it for different seeing conditions, as quantified with Fried’s parameter $r_0$? Using Kolmogorov statistics for different $r_0$ without any assumption of partial correction by an AO system makes the statistics from this experiment relevant to open-loop WFS. I.e., systems for measuring seeing statistics [e.g., @scharmer10s-dimm+] and the capture phase for AO systems, but not necessarily AO systems in closed loop. Artificial data {#sec:seeing-data-recipe} --------------- The setup corresponds to a filled 98-cm pupil (like the SST) with 85 subapertures, each 9.55 cm edge-to-edge. In Fig. \[fig:hexwavefront\] we show one sample Kolmogorov wavefront phase. The superimposed pattern shows the geometry of 85 hexagonal microlenses circumscribed by a telescope pupil. Figure \[fig:hexplanes\] demonstrates the local plane approximation implicit in SH wavefront sensing, while Fig. \[fig:hextilts\] shows just the tilts. The exact geometry does not matter for the results reported here, since our tests do not involve the step where wavefronts are reconstructed from the shift measurements. However, the geometry discussed is along the lines planned for the next generation SST AO system, and in that respect motivates the particular subaperture size and shape investigated. We generated 100 wavefront phases, $\phi_i$, following Kolmogorov statistics. For making each simulated phase screen, the following procedure was used. 1003 random numbers were drawn from a standard normal distribution, scaled with the square root of the atmospheric variances and used as coefficients for atmospheric Karhunen–Lòeve (KL) functions 2–1004. We used KL functions based directly on the theory of @fried78probability, as implemented by @dai95modal. These modes are numbered in order of decreasing atmospheric variance, and the exact range of indices is motivated by KL$_1$ being piston and KL$_{1005}$ a circular mode, starting a higher radial order. Figure \[fig:hexwavefront\] shows a sample wavefront masked with the pattern of the 85 hexagonal microlens geometry. For all simulations, we used a wavelength of 500 nm and a telescope aperture diameter of $D_\text{tel}=98$ cm. In order to cover a range of different seeing conditions, we scaled these wavefront phases to different values of Fried’s parameter, $r_0\in\{5,7,10,15,20\}$ cm, by multiplying with $(D_\text{tel}/r_0)^{5/6}$. Using the same wavefront shapes scaled differently like this, the performance for different values of $r_0$ should be directly comparable. For each random wavefront, separately scaled to each value of $r_0$, and for each subpupil defined by a microlens, we generated an image by convolving the GI in Fig. \[fig:GI\] with a PSF based on the subpupil and the local wavefront phase. We want to examine the effect of using different subfield sizes. Increased subfield size can be used in different ways: Either one can change the image scale, so the same amount of granulation fits in the FOV but in better pixel resolution. Or one can keep the original image scale so more granulation fits in the FOV. (Or something in between.) Therefore we make images at three different image scales by box-car compressing them by three different integer factors, 7, 10, and 13. The resulting image scales are 029/pixel, 041/pixel, and 053/pixel. To summarize: the images we have generated were downgraded to the resolution of the subpupil, shifted by the local wavefront tilt and also somewhat blurred by the local wavefront curvature, the latter in particular for data with small $r_0$. A bias was then added to make the RMS contrast of the granulation pattern approximately 3% of the mean intensity. Processing {#sec:seeing-processing} ---------- For each subfield size, noise, and $r_0$, relative shifts and tilts were calculated for 490,000 randomly selected pairs of subpupil images. We operate on image pairs corresponding to subpupils from different random wavefronts, so results are not influenced systematically by spatial correlations. For the shift measurements, we use all the CF methods from Sect. \[sec:algorithms\] except ADF, applied to the two images in a pair. We use only the 2QI method for subpixel interpolation. Shift measurements were calculated twice, with and without 1% noise added to the images. We increased the noise level from the 0.5% used in Sect. \[sec:noisy-data\] to make the effect of noise clearer and thus allow better comparison of different methods. We do not investigate bias mismatch in this experiment. The conclusion from Sect. \[sec:algorithm-accuracy\] is that bias mismatch should be compensated for before applying the shift measurement methods. For comparison with the shift measurements, $\delta x_\text{shift}$, we fitted Zernike tip and tilt to each wavefront, within each hexagonal subpupil. The relative wavefront tilt for an image pair is the difference between the Zernike tilts for the two subpupils in the pair. These relative tip/tilt coefficients in radians, $\alpha_x$, are converted to image shift, $$\label{eq:5} \delta x_\text{tilt} = \frac{2\lambda}{\pi r D_\text{tel}} \alpha_x$$ where $r$ is the image scale in rad/pixel. We calculate the robust statistics of the resulting shifts, remove $4\sigma$ outliers, and fit the data to the relationship $$\label{eq:linesine2} \delta x_\text{shift} = p_0 + p_1\delta x_\text{tilt} + p_2\sin(a\, \delta x_\text{tilt}),$$ where $a=2\pi/1$ pixel. Compare Eq. (\[eq:linesineone\]). Results {#sec:seeing-results} ------- Table \[tab:seeing-simulation\] shows the results from images with image scale 041/pixel and different subfield sizes, $N\times N$ pixels with $N=16$, 24, and 36. In seconds of arc, this corresponds to 656, 984, and 1476 squared. The results using the additional image scales 029/pixel and 053/pixel and a single array size, $N=24$, are in Table \[tab:seeing-simulation-comp\]. In addition to the tabulated results, we calculated the following: The standard deviation of $\delta x_\text{tilt}$ varies linearly with $r_0^{-5/6}$ as expected from Kolmogorov statistics: $\sigma_\text{tilt}=2.68$, 2.02, 1.50, 1.07, 0.84 pixels $=1\farcs10$, 083, 062, 044, 034 for $r_0=5$, 7, 10, 15, 20 cm, resp. Correlation coefficients calculated after removal of outliers are very high: they round to 1.00 in all cases, except CFF and CFI at $N=16$ for which they are 0.98–0.99. ### Failures {#sec:failures} If the errors were normal distributed, using a $4\sigma$ limit for defining outliers should give a failure rate of 0.0063%. With almost $5\cdot10^5$ samples we expect the normal distribution to be well realized but the actual failure rates are larger. All CAs show an excess but by far it is the CFF results that suffer from the highest number of outliers, particularly for small subfields and small $r_0$. CFI is also slightly worse than SDF and ADF$^2$. All failure rates are $\la10$%. They are $>2$% for $r_0=5$ cm with all methods when using the smallest image scale, 029/pixel, and for small FOVs and $r_0=5$, 7 cm (and CFI $r_0=5$ cm, noise, $N=16$). Figure \[fig:fail\] illustrates how failure rates sometimes increase with noise, sometimes decrease. With noise, the failure rate decreases with large FOV in arcsec (exception: $N=36$ and $r_0=7$ cm). The variation is less systematic without noise. ![Failure rates in percent for different FOV sizes and CAs. Red: $r_0=20$ cm; Green: $r_0=10$ cm; Blue: $r_0=7$ cm. $N\times N$-pixel subfields. Diamonds ($\Diamond$): image scale 041/pixel, varying $N=\{16, 24, 36\}$ as labeled in the plots; Down triangles ($\triangledown$): 029/pixel, $N=24$; Up triangles ($\vartriangle$): 053/pixel, $N=24$. All the above without noise. Crosses ($\times$): corresponding FOVs, with noise. Note different vertical scales.[]{data-label="fig:fail"}](15331fg6a "fig:"){width="\tilewidth"} ![Failure rates in percent for different FOV sizes and CAs. Red: $r_0=20$ cm; Green: $r_0=10$ cm; Blue: $r_0=7$ cm. $N\times N$-pixel subfields. Diamonds ($\Diamond$): image scale 041/pixel, varying $N=\{16, 24, 36\}$ as labeled in the plots; Down triangles ($\triangledown$): 029/pixel, $N=24$; Up triangles ($\vartriangle$): 053/pixel, $N=24$. All the above without noise. Crosses ($\times$): corresponding FOVs, with noise. Note different vertical scales.[]{data-label="fig:fail"}](15331fg6b "fig:"){width="\tilewidth"} ![Failure rates in percent for different FOV sizes and CAs. Red: $r_0=20$ cm; Green: $r_0=10$ cm; Blue: $r_0=7$ cm. $N\times N$-pixel subfields. Diamonds ($\Diamond$): image scale 041/pixel, varying $N=\{16, 24, 36\}$ as labeled in the plots; Down triangles ($\triangledown$): 029/pixel, $N=24$; Up triangles ($\vartriangle$): 053/pixel, $N=24$. All the above without noise. Crosses ($\times$): corresponding FOVs, with noise. Note different vertical scales.[]{data-label="fig:fail"}](15331fg6c "fig:"){width="\tilewidth"} ### Shift errors {#sec:shift-errors} We now plot some of the results in pixels rather than seconds of arc so we can compare with the results in Sect. \[sec:algorithm-accuracy\]. Figure \[fig:scatter-seeing-36x36-r0-20cm\] shows the easiest case: the largest subfields, 36$\times$36 pixels, and the best seeing, $r_0=20$ cm, resulting in the smallest shifts. The ADF$^2$ results are so similar to the SDF results that we only show the latter of those two methods. The errors in Fig. \[fig:scatter-seeing-36x36-r0-20cm\] are consistent with the ones in Fig. \[fig:perfect-scatter-four-methods\]. We recognize the undulations but we also see a linear trend, not only for CFF. SDF and CFF: $\sigma_\text{err}\approx0.025$ matches undulations plus error bars (CFF because for 20 cm we are dominated by small shifts). CFI: $\sigma_\text{err}\approx0.045$ is actually slightly better than in Fig. \[fig:perfect-scatter-four-methods\]. These results (as opposed to the ones in Fig. \[fig:scatter-seeing-16x16-r0-5cm\]) are comparable because 20 cm is much larger than the subpupil size so the images are not blurred by local phase curvature. We cannot directly compare the noisy data, because here we have 1% noise, in the earlier experiment we used 0.5%. Linear fit offsets are small, $\left|p_0\right|\lesssim 0.002$ pixels for all CAs, except CFI that has $\left|p_0\right|\lesssim0.016$ pixels. The undulations fit sines with an amplitude of $p_2 \approx 0.01$ pixels in almost all cases, consistent with Figs. \[fig:perfect-scatter-four-methods\] and \[fig:perfect-scatter-SDF-different-versions\], where we used identical images with known shifts. The exceptions are cases when both $N$ and $r_0$ are small. In the latter cases the fits to the sine function sometimes give as small amplitudes as $p_2 \approx 0.002$ pixels or less. For shifts smaller than 0.2 pixels, the overall overestimation turns into underestimation. For CFF, the slope and the undulations work in the same direction, making the underestimation larger for the smaller shifts. Figure \[fig:scatter-seeing-16x16-r0-5cm\] shows the most demanding test: the smallest subfields, 16$\times$16 pixels, and the worst seeing, $r_0=5$ cm, resulting in the largest shifts and the images most blurred by local phase curvature. Note much larger dispersion and error in $p_1$ for CFF. ### Z-tilts and G-tilts {#sec:slopes} The wavefront tilt measured over a subpupil is by necessity an approximation because in reality the tilts vary over the subpupil. In the night-time literature, two kinds of tilts are discussed [e.g., @tokovinin02differential]. G-tilts correspond to averaging the wavefront Gradient, which is mathematically equivalent to measuring the center of Gravity of the PSF. However, because of noise and asymmetrical PSFs this can never be realized in practice. Windowing and thresholding the PSF gives measurements that are more related to Z-tilts, corresponding to Zernike tip/tilt and the location of the PSF peak. When interpreting the measurements, we need to know what kind of tilts are measured by our methods. The simulated tilts are implemented as coefficients to the Zernike tip and tilt polynomials. So if the shifts measure Z-tilts, the expected $p_1$ is by definition $$\label{eq:8} E[p_1\mid\text{Z-tilt}]\equiv1 .$$ In order to derive the expected $p_1$ for G-tilts, we use formulas given by @tokovinin02differential. The variance of the differential image motion can be written as $$\label{eq:4} \sigma_\text{d}^2 = K \lambda^2 r_0^{-5/3} D^{-1/3}$$ where $D$ is the subpupil diameter and $K$ is a number that depends on the kind of tilt. The expected $p_1$ should be equal to the ratio of $\sigma_\text{d}$ for G-tilts and Z-tilts, i.e., $$E[p_1\mid\text{G-tilt}] = \sqrt{K_\text{G}/K_\text{Z}} \approx 0.966, \label{eq:7}$$ where we used $K=K_\text{G}=0.340$ for G-tilts and $K=K_\text{Z}=0.364$ for Z-tilts, which is asymptotically correct for large separations between subpupils. This corresponds to a 3.4% difference in tilt measurements with Z-tilts giving the larger numbers, regardless of wavelength, seeing conditions, and aperture size. For smaller separations, $E[p_1\mid\text{G-tilt}]$ is even smaller and depends somewhat on whether the shifts are longitudinal or transversal (parallel or orthogonal to the line separating the apertures). In our results for all the CAs except CFF, $1.007\la p_1 \la 1.010$. This means they overestimate Z-tilts systematically by $\la1$% and G-tilts by $\ga4.6$%. This result appears to be robust with respect to noise, subfield size, image scale, and seeing conditions. ![image](15331fg7a){width="\figwidth"} ![image](15331fg7b){width="\figwidth"} ![image](15331fg7c){width="\figwidth"}\ ![image](15331fg7d){width="\figwidth"} ![image](15331fg7e){width="\figwidth"} ![image](15331fg7f){width="\figwidth"} ![image](15331fg8a){width="\figwidth"} ![image](15331fg8b){width="\figwidth"} ![image](15331fg8c){width="\figwidth"}\ ![image](15331fg8d){width="\figwidth"} ![image](15331fg8e){width="\figwidth"} ![image](15331fg8f){width="\figwidth"} The CFF $p_1$ values confirm the underestimation of the image shift found in Sect. \[sec:perfect-processing\]. They depend mostly on the size of the subfield but also to some extent on $r_0$ and noise. CFF $p_1$ improves with larger FOV in arcsec, regardless of number of pixels and $r_0$. For small FOVs, CFF $p_1$ is smaller than the G-tilts slope of 0.966, grows with FOV size to cross G-tilt at $\sim$14. For the other CAs, $p_1$ is independent of FOV size. CFF slope improved with subfield size. Is it the size in pixels or the size in arcsec that is important? Figure \[fig:slopes\] makes it clear that the size in arcsec is the important parameter and that with even larger FOVs we should expect to measure something closer to Z-tilts. We conjecture that the important parameter is the number of 1 resolution elements that fit within the FOV. ### RMS errors {#sec:rms-errors} ![CFF slopes $p_1$ for different FOV sizes, no noise. Red: $r_0=20$ cm; Green: $r_0=10$ cm; Blue: $r_0=7$ cm. $N\times N$-pixel subfields. Diamonds ($\Diamond$): image scale 041/pixel, varying $N=\{16, 24, 36\}$ as labeled in the plots; Down triangles ($\triangledown$): 029/pixel, $N=24$; Up triangles ($\vartriangle$): 053/pixel, $N=24$. All the above without noise. Crosses ($\times$): corresponding FOVs, with noise. The dotted lines correspond to Z-tilt ($p_1= 1.0$) and G-tilt ($p_1= 0.966$), respectively (see Eq. (\[eq:7\])). The gray band at the top represents the $1.007 \protect\la p_1 \protect\la 1.010$ range of the other CA results.[]{data-label="fig:slopes"}](15331fg9){width="\tilewidth"} The RMS errors, $\sigma_\text{err}$, are calculated after removing the outliers and after subtracting the linear fit. In spite of having more outliers removed from the calculations, the errors are worse for CFF than for the other methods. Compared to the noise-free data, adding 1% image noise significantly increases $\sigma_\text{err}$. In many cases it also decreases the number of fails (all cases with $N=24,36$, most cases with $N=16$). This probably means the noise makes the error distribution more Gaussian. $\sigma_\text{err}$ decreases with increasing $r_0$, indicating that the algorithms perform better in good seeing, as expected. But are the results worse in bad seeing because of local phase curvature (i.e., smearing) or just because the shifts are larger? The relative measure, $\sigma_\text{err}/\sigma_\text{tilt}$, does not decrease as much with $r_0$ for zero-noise data and actually tends to *increase* for noisy data. So the latter should not be the major effect. ![Standard deviations of errors. $\sigma_\text{err}$ for different FOV sizes and CAs. Red: $r_0=20$ cm; Green: $r_0=10$ cm; Blue: $r_0=7$ cm. $N\times N$-pixel subfields. Diamonds ($\Diamond$): image scale 041/pixel, varying $N=\{16, 24, 36\}$ as labeled in the plots; Down triangles ($\triangledown$): 029/pixel, $N=24$; Up triangles ($\vartriangle$): 053/pixel, $N=24$. Symbols below the dotted line correspond to data without noise, above the line with 1% noise. Note different vertical scales.[]{data-label="fig:sdevs"}](15331fg10a "fig:"){width="\tilewidth"} ![Standard deviations of errors. $\sigma_\text{err}$ for different FOV sizes and CAs. Red: $r_0=20$ cm; Green: $r_0=10$ cm; Blue: $r_0=7$ cm. $N\times N$-pixel subfields. Diamonds ($\Diamond$): image scale 041/pixel, varying $N=\{16, 24, 36\}$ as labeled in the plots; Down triangles ($\triangledown$): 029/pixel, $N=24$; Up triangles ($\vartriangle$): 053/pixel, $N=24$. Symbols below the dotted line correspond to data without noise, above the line with 1% noise. Note different vertical scales.[]{data-label="fig:sdevs"}](15331fg10b "fig:"){width="\tilewidth"} ![Standard deviations of errors. $\sigma_\text{err}$ for different FOV sizes and CAs. Red: $r_0=20$ cm; Green: $r_0=10$ cm; Blue: $r_0=7$ cm. $N\times N$-pixel subfields. Diamonds ($\Diamond$): image scale 041/pixel, varying $N=\{16, 24, 36\}$ as labeled in the plots; Down triangles ($\triangledown$): 029/pixel, $N=24$; Up triangles ($\vartriangle$): 053/pixel, $N=24$. Symbols below the dotted line correspond to data without noise, above the line with 1% noise. Note different vertical scales.[]{data-label="fig:sdevs"}](15331fg10c "fig:"){width="\tilewidth"} How do the errors depend on the FOV size in pixels and in seconds of arc? Figure \[fig:sdevs\] visualizes the following results from the tables. Without noise, the CFI and CFF results improve with $N$ as well as the FOV size in arcsec with constant $N$ (particularly from 69 to 98). SDF (and ADF$^2$, the results are so similar that we show only SDF) errors are more or less independent of FOV size, whether in pixels or in arcsec. With 1% noise, however, for all methods, the errors improve only slightly or *grow* when $N$ is constant and we increase the FOV size in arcsec (particularly from 7 to 13). When $N$ is increased from 16 to 24 with almost constant 7 FOV, there is a significant improvement. There is also significant improvement when $N$ is increased from 24 to 36 (although FOV in arcsec is increased too). So for the RMS error, FOV in pixels is more important than FOV in arc seconds with noisy data. Discussion and conclusions {#sec:conclusions} ========================== We have evaluated five different correlation algorithms and four different interpolation algorithms in two experiments with artificial data. Among these are the algorithms in use for the AO systems installed at the major high-resolution solar telescopes today. The experiment in Sect. \[sec:algorithm-accuracy\] examined the inherent performance of the methods with identical images and known shifts, and introduced also the effects of noise and a mismatch in intensity level (bias) between images. We used image contrast and noise levels that resemble the setup at the SST. In Sect. \[sec:image-shift-versus\], we introduced also local wavefront curvature and different seeing conditions. Based on the results of both experiments, we recommend SDF and ADF$^2$ (in that order) for calculating the correlation functions. They give significantly smaller random errors and more predictable systematic errors than the competing methods. The SDF results are marginally better so if computational speed is not an issue, use SDF. But ADF$^2$ may, by virtue of speed, be preferable. For subpixel interpolation, 2QI and 2LS perform better than the one-dimensional interpolation algorithms. It is clear from Sect. \[sec:algorithm-accuracy\] that bias mismatch has a strong and negative effect on the performance of the difference based methods. We have not established how much bias mismatch can be tolerated. Our conclusion is that it should be compensated for in pre-processing, before the shift measurement methods are applied. The simulated bias mismatch comes from multiplication with 1.01 but is compensated for by subtraction for CFI and CFF. Based on the fact that the latter methods give identical results with and without the bias mismatch (to the number of digits shown in the table), we conclude that it does not matter whether the mismatch is removed by multiplication or by subtraction. @waldmann07untersuchung considers only a single correlation algorithm (CFF) but tries a few different interpolation algorithms: 1LS, 2LS, and a Gaussian fit. He finds a Gaussian fit to give the better results but the almost as good 1LS is less complicated so he uses that. On the other hand, @2008SPIE.7015E.154W try CFF, CFI (almost: correlation instead of covariance) and ADS, but use only 1LS for interpolation. They find CFF best and ADF worst. Trying all combinations of those methods, @johansson10cross-correlation confirmed that Gaussian and 1LS perform similarly together with CFF. She also found a similar performance for SDF and ADF$^2$ with 1LS and Gaussian, but significantly better performance when interpolated with 2QI and 2LS. Based on this, we did not try a Gaussian fit here because of its greater computational cost. We have demonstrated that with the recommended methods, we can measure shifts in identical, noise-free 24$\times$24-pixel images with an RMS error of $0.012$ pixels, corresponding to 0008 in our setup. With realistic image noise RMS of 0.5% the errors increase by 40% to 0.017 pixels. Increasing the noise RMS to 1% gives unacceptably high measurement errors in bad seeing, particularly with small subimages. Increasing the subfield size from 16 to 24 pixels squared reduced the error by approximately 30%. One method is not necessarily the best choice both for open loop and for closed loop. In open loop, performance for large shifts and predictable systematic errors are important. However, CFF can compete with SDF and ADF$^2$ in closed loop, where wavefronts are already compensated for and the CFF random errors are small. The underestimation of the shifts can be reduced by using a larger FOV and can also be compensated for with a different servo gain. Based on our results, it is difficult to find a reason to ever use the CFI method. We have found that SDF, ADF$^2$, and CFI measure Z-tilts with a 1% systematic overestimation, rather than G-tilts. For the FOV sizes relevant to solar SH WFS, the CFF method severely underestimates the tilts but it is likely that for larger FOVs, also CFF measures Z-tilts. @johansson10cross-correlation processed also images together with a reference image that was slightly distorted geometrically (compressed image scale in one direction and expanded in the other) and found that it could significantly affect the results. We have not examined this effect here. A relevant further test involving anisoplanatic effects would require geometrical distortions and differential blurring whose shape and magnitude can be calculated from realistic atmospheric turbulence profiles. We end by emphasizing that small details in processing may have large effects on the results. Examples from our evaluation include ADF versus ADF$^2$ and the choice of apodization for CFF. As in so many other situations, it is important to keep track of implementation details and other tricks, so they are not lost when upgrading SH software. I acknowledge many discussions with G[ö]{}ran Scharmer, Pit Sütterlin and Tim van Werkhoven. Thomas Berkefeld and Torsten Waldmann helped me understand some details of their work with the VTT adaptive optics system and simulations of wide-field wavefront sensors. I am grateful to George (formerly Guang-ming) Dai for calculating and sharing the radial Karhunen–Lòeve functions used for the atmospheric turbulence simulations. The Swedish 1-m Solar Telescope is operated on the island of La Palma by the Institute for Solar Physics of the Royal Swedish Academy of Sciences in the Spanish Observatorio del Roque de los Muchachos of the Instituto de Astrofísica de Canarias. [28]{} natexlab\#1[\#1]{} Ballesteros, E., Collados, M., Bonet, J. A., [et al.]{} 1996, , 115, 353 Dai, G.-m. 1995, J. Opt. Soc. Am. A, 12, 2182 Enochson, L. D. & Otnes, R. K. 1968, Programming and Analysis for Digital Time Series Data (U.S. Dept. of Defense, Shock and Vibration Info. Center) Fried, D. L. 1978, J. Opt. Soc. Am., 68, 1651 Johansson, U. 2010, Masters thesis, Stockholm University , C. U., [Plymate]{}, C., & [Ammons]{}, S. M. 2003, in Proc. SPIE, Vol. 4853, Innovative Telescopes and Instrumentation for Solar Astrophysics, ed. [S. L. Keil and S. V. Avakyan]{}, 351–359 , P. A., [Owner-Petersen]{}, M., & Dainty, C. 2005, Optics Express, 13, 9527 , W. B. 1993, in Astronomical Society of the Pacific Conf. Ser., Vol. 52, Astronomical Data Analysis Software and Systems II, ed. [R. J. Hanisch, R. J. V. Brissenden, & J. Barnes]{}, 246, see also <http://idlastro.gsfc.nasa.gov> L[ö]{}fdahl, M. G. 2002, in Proc. SPIE, Vol. 4792, Image Reconstruction from Incomplete Data II, ed. P. J. Bones, M. A. Fiddy, & R. P. Millane, 146–155 , C. B. 2009, in Astronomical Society of the Pacific Conf. Ser., Vol. 411, Astronomical Data Analysis Software and Systems XVIII, ed. [D. A. Bohlender, D. Durand, & P. Dowler]{}, 251–254, see also <http://purl.com/net/mpfit> , N., [Noto]{}, Y., [Kato]{}, S., [et al.]{} 2009, Optical Review, 16, 558 Mor[é]{}, J. 1978, in Lecture Notes in Mathematics, Vol. 630, Numerical Analysis, ed. G. A. Watson (Springer-Verlag), 105 , L. J. 1986, , 25, 392 , L. J. & [Simon]{}, G. W. 1988, , 333, 427 , T. R. & [Radick]{}, R. R. 1998, in Proc. SPIE, Vol. 3353, Adaptive Optical System Technologies, ed. D. [Bonaccini]{} & R. K. [Tyson]{}, 72–81 , T. R., [Richards]{}, K., [Hegwer]{}, S., [et al.]{} 2004, in Proc. SPIE, Vol. 5171, Telescopes and Instrumentation for Solar Astrophysics, ed. S. [Fineschi]{} & M. A. [Gummin]{}, 179–186 Scharmer, G. B., Dettori, P., L[ö]{}fdahl, M. G., & Shand, M. 2003, in Proc. SPIE, Vol. 4853, Innovative Telescopes and Instrumentation for Solar Astrophysics, ed. S. Keil & S. Avakyan, 370–380 Scharmer, G. B., Shand, M., L[ö]{}fdahl, M. G., Dettori, P. M., & Wei, W. 2000, in Proc. SPIE, Vol. 4007, Adaptive Optical Systems Technologies, ed. P. L. Wizinowich, 239–250 , G. B. & [van Werkhoven]{}, T. I. M. 2010, , 513, A25 Shack, R. & Platt, B. C. 1971, J. Opt. Soc. Am., 61, 656 Shand, M., Wei, W., & Scharmer, G. 1995, in Proc. SPIE, Vol. 2607, Field Programmable Gate Arrays ([FPGA]{}s) for Fast Board Development and Reconfigurable Computing, ed. J. Schewel, 145–154 , R. C. & [Tarbell]{}, T. D. 1977, [Correlation tracking study for meter-class solar telescope on space shuttle]{}, Tech. rep., Lockheed Missiles and Space Co. Tokovinin, A. 2002, , 114, 1156 , O. 1983, , 119, 85 , O., [Soltau]{}, D., [Berkefeld]{}, T., & [Schelenz]{}, T. 2003, in Proc. SPIE, Vol. 4853, Innovative Telescopes and Instrumentation for Solar Astrophysics, ed. S. L. [Keil]{} & S. V. [Avakyan]{}, 187–193 Waldmann, T. A. 2007, Diploma thesis, Kiepenheuer Institut f[ü]{}r Sonnenphysik , T. A., [Berkefeld]{}, T., & [von der L[ü]{}he]{}, O. 2008, in Proc. SPIE, Vol. 7015, Adaptive Optics Systems, ed. N. Hubin, C. E. Max, & P. L. Wizinowich, 70155O Yi, Z. & Molowny Horas, R. L. 1992, in LEST Technical Report, Vol. 56, Proc. [LEST]{} Mini-Workshop: Software for Solar Image Processing, ed. Z. Yi, T. Darvann, & R. Molowny Horas [llcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}l]{} & & && && && && &\ && $\sigma$ (pix) & && $\sigma$ (pix) & && $\sigma$ (pix) & && $\sigma$ (pix) & && $\sigma$ (pix) & &\ SDF & 1LS & 0.037 & 1.0 && 0.042 & 1.5 && 0.041 & 1.4 && 0.028 & 0.3 && 0.027 & 0.3 &\ SDF & 1QI & 0.033 & 1.8 && 0.039 & 2.5 && 0.038 & 2.4 && 0.023 & 0.6 && 0.022 & 0.6 &\ SDF & 2LS & 0.020 & 0.3 && 0.021 & 0.3 && 0.020 & 0.3 && 0.020 & 0.1 && 0.020 & 0.3 &\ SDF & 2QI & 0.017 & 0.3 && 0.017 & 0.2 && 0.017 & 0.3 && 0.016 & 0.2 && 0.016 & 0.3 &\ CFI & 1LS & 0.114 & 0.8 && 0.119 & 0.5 && 0.118 & 0.7 && 0.109 & 0.9 && 0.106 & 0.9 &\ CFI & 1QI & 0.108 & 0.7 && 0.112 & 0.5 && 0.112 & 0.6 && 0.103 & 0.9 && 0.100 & 0.8 &\ CFI & 2LS & 0.109 & 0.7 && 0.109 & 0.4 && 0.110 & 0.6 && 0.109 & 1.0 && 0.105 & 0.9 &\ CFI & 2QI & 0.101 & 0.6 && 0.102 & 0.4 && 0.102 & 0.5 && 0.101 & 0.9 && 0.099 & 0.9 &\ CFF & 1LS & 0.112 & 5.0 && 0.060 & && 0.078 & && 0.187 & 3.0 && 0.295 & 30.1 & 0.86\ CFF & 1QI & 0.111 & 5.0 && 0.063 & && 0.079 & && 0.180 & 3.0 && 0.280 & 30.1 & 0.86\ CFF & 2LS & 0.091 & 5.1 && 0.032 & && 0.056 & && 0.167 & 3.2 && 0.269 & 30.8 & 0.86\ CFF & 2QI & 0.085 & 5.1 && 0.031 & && 0.054 & && 0.156 & 3.2 && 0.251 & 30.9 & 0.86\ ADF & 1LS & 0.045 & 9.9 && 0.049 & 14.3 && 0.049 & 13.7 && 0.039 & 2.9 && 0.037 & 2.6 &\ ADF & 1QI & 0.042 & 33.0 && 0.046 & 39.3 && 0.047 & 39.1 && 0.031 & 22.4 && 0.027 & 20.6 &\ ADF & 2LS & 0.030 & 23.1 && 0.033 & 25.1 && 0.033 & 25.1 && 0.022 & 19.2 && 0.022 & 17.5 &\ ADF & 2QI & 0.028 & 42.8 && 0.032 & 49.4 && 0.032 & 49.7 && 0.017 & 30.1 && 0.016 & 27.6 &\ ADF$^2$ & 1LS & 0.038 & 0.9 && 0.044 & 1.3 && 0.044 & 1.3 && 0.029 & 0.3 && 0.028 & 0.3 &\ ADF$^2$ & 1QI & 0.035 & 1.5 && 0.041 & 2.2 && 0.041 & 2.1 && 0.025 & 0.5 && 0.023 & 0.5 &\ ADF$^2$ & 2LS & 0.022 & 0.3 && 0.023 & 0.4 && 0.023 & 0.4 && 0.021 & 0.1 && 0.021 & 0.1 &\ ADF$^2$ & 2QI & 0.019 & 0.1 && 0.019 & 0.1 && 0.019 & 0.1 && 0.018 & && 0.018 & &\ SDF & 1LS & 0.025 & 1.2 && 0.029 & 1.7 && 0.029 & 1.6 && 0.019 & 0.4 && 0.019 & 0.5 &\ SDF & 1QI & 0.024 & 1.8 && 0.028 & 2.6 && 0.028 & 2.5 && 0.018 & 0.6 && 0.017 & 0.7 &\ SDF & 2LS & 0.014 & 0.7 && 0.014 & 0.9 && 0.014 & 0.8 && 0.013 & 0.3 && 0.013 & 0.2 &\ SDF & 2QI & 0.012 & 0.8 && 0.012 & 0.9 && 0.012 & 0.9 && 0.012 & 0.5 && 0.011 & 0.4 &\ CFI & 1LS & 0.070 & 0.3 && 0.073 & 0.2 && 0.072 & 0.2 && 0.067 & 0.3 && 0.064 & 0.4 &\ CFI & 1QI & 0.066 & 0.2 && 0.070 & 0.2 && 0.069 & 0.2 && 0.062 & 0.3 && 0.060 & 0.4 &\ CFI & 2LS & 0.065 & 0.3 && 0.067 & 0.1 && 0.066 & 0.2 && 0.065 & 0.5 && 0.062 & 0.6 &\ CFI & 2QI & 0.061 & 0.2 && 0.062 & && 0.062 & 0.1 && 0.061 & 0.5 && 0.058 & 0.5 &\ CFF & 1LS & 0.053 & 0.8 && 0.042 & && 0.047 & && 0.055 & 0.5 && 0.083 & 4.5 & 0.94\ CFF & 1QI & 0.053 & 0.8 && 0.043 & && 0.048 & && 0.052 & 0.5 && 0.079 & 4.4 & 0.94\ CFF & 2LS & 0.040 & 0.6 && 0.017 & && 0.025 & && 0.062 & 0.4 && 0.092 & 3.6 & 0.94\ CFF & 2QI & 0.036 & 0.7 && 0.017 & && 0.024 & && 0.055 & 0.5 && 0.082 & 3.8 & 0.94\ ADF & 1LS & 0.029 & 34.9 && 0.032 & 41.3 && 0.033 & 41.7 && 0.021 & 22.7 && 0.019 & 20.8 &\ ADF & 1QI & 0.027 & 49.6 && 0.030 & 58.7 && 0.031 & 59.4 && 0.015 & 31.6 && 0.012 & 29.0 &\ ADF & 2LS & 0.016 & 47.9 && 0.019 & 54.3 && 0.019 & 55.0 && 0.009 & 33.8 && 0.010 & 31.1 &\ ADF & 2QI & 0.014 & 57.3 && 0.018 & 66.9 && 0.019 & 67.6 && 0.009 & 37.0 && 0.008 & 34.0 &\ ADF$^2$ & 1LS & 0.026 & 1.2 && 0.029 & 1.8 && 0.029 & 1.6 && 0.020 & 0.4 && 0.019 & 0.5 &\ ADF$^2$ & 1QI & 0.025 & 1.7 && 0.029 & 2.5 && 0.029 & 2.3 && 0.018 & 0.6 && 0.017 & 0.7 &\ ADF$^2$ & 2LS & 0.015 & 0.6 && 0.015 & 0.8 && 0.015 & 0.7 && 0.014 & 0.1 && 0.014 & 0.3 &\ ADF$^2$ & 2QI & 0.013 & 0.2 && 0.014 & 0.4 && 0.014 & 0.3 && 0.013 & 0.1 && 0.012 & 0.1 &\ [llcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}ll]{} & & && && && && &\ && $\sigma$ (pix) & && $\sigma$ (pix) & && $\sigma$ (pix) & && $\sigma$ (pix) & && $\sigma$ (pix) & &\ SDF & 1LS & 0.041 & 0.6 && 0.045 & 1.0 && 0.045 & 0.9 && 0.033 & 0.2 && 0.033 & 0.2 &\ SDF & 1QI & 0.038 & 1.0 && 0.043 & 1.4 && 0.043 & 1.3 && 0.030 & 0.3 && 0.029 & 0.2 &\ SDF & 2LS & 0.026 & 0.1 && 0.026 & 0.1 && 0.026 & 0.1 && 0.026 & 0.1 && 0.026 & 0.1 &\ SDF & 2QI & 0.024 & 0.1 && 0.024 & 0.1 && 0.024 & 0.1 && 0.023 & 0.1 && 0.023 & 0.1 &\ CFI & 1LS & 0.117 & 0.7 && 0.122 & 0.5 && 0.121 & 0.7 && 0.111 & 0.9 && 0.107 & 0.8 &\ CFI & 1QI & 0.110 & 0.7 && 0.115 & 0.4 && 0.114 & 0.6 && 0.105 & 0.8 && 0.101 & 0.8 &\ CFI & 2LS & 0.111 & 0.7 && 0.113 & 0.4 && 0.113 & 0.6 && 0.111 & 0.9 && 0.107 & 0.9 &\ CFI & 2QI & 0.104 & 0.6 && 0.105 & 0.4 && 0.105 & 0.5 && 0.103 & 0.9 && 0.100 & 0.8 &\ CFF & 1LS & 0.118 & 5.0 && 0.069 & && 0.085 & && 0.189 & 3.1 && 0.297 & 30.0 & 0.86\ CFF & 1QI & 0.117 & 5.0 && 0.072 & && 0.086 & && 0.182 & 3.1 && 0.281 & 30.0 & 0.86\ CFF & 2LS & 0.097 & 5.1 && 0.043 & && 0.064 & && 0.168 & 3.3 && 0.271 & 30.7 & 0.86\ CFF & 2QI & 0.092 & 5.2 && 0.043 & && 0.062 & && 0.158 & 3.3 && 0.252 & 30.8 & 0.86\ ADF & 1LS & 0.049 & 4.0 && 0.053 & 6.2 && 0.053 & 5.8 && 0.040 & 0.7 && 0.038 & 0.7 &\ ADF & 1QI & 0.055 & 11.2 && 0.059 & 16.1 && 0.059 & 15.6 && 0.047 & 3.3 && 0.044 & 3.1 &\ ADF & 2LS & 0.039 & 5.8 && 0.041 & 6.5 && 0.041 & 6.5 && 0.033 & 4.6 && 0.032 & 4.3 &\ ADF & 2QI & 0.041 & 20.2 && 0.044 & 23.5 && 0.045 & 23.6 && 0.033 & 14.2 && 0.032 & 13.1 &\ ADF$^2$ & 1LS & 0.043 & 0.6 && 0.048 & 0.9 && 0.048 & 0.8 && 0.035 & 0.1 && 0.034 & 0.1 &\ ADF$^2$ & 1QI & 0.041 & 0.8 && 0.046 & 1.2 && 0.046 & 1.1 && 0.032 & 0.2 && 0.031 & 0.2 &\ ADF$^2$ & 2LS & 0.028 & 0.2 && 0.029 & 0.2 && 0.029 & 0.2 && 0.027 & 0.1 && 0.028 & 0.1 &\ ADF$^2$ & 2QI & 0.026 & 0.1 && 0.027 & 0.1 && 0.027 & 0.1 && 0.025 & && 0.025 & &\ SDF & 1LS & 0.028 & 0.7 && 0.032 & 1.0 && 0.032 & 0.9 && 0.022 & 0.2 && 0.022 & 0.3 &\ SDF & 1QI & 0.027 & 0.9 && 0.031 & 1.3 && 0.031 & 1.3 && 0.021 & 0.3 && 0.021 & 0.4 &\ SDF & 2LS & 0.018 & 0.1 && 0.018 & 0.2 && 0.018 & 0.2 && 0.017 & 0.1 && 0.017 & 0.1 &\ SDF & 2QI & 0.017 & 0.1 && 0.017 & 0.1 && 0.017 & 0.1 && 0.016 & && 0.016 & &\ CFI & 1LS & 0.071 & 0.3 && 0.074 & 0.2 && 0.073 & 0.2 && 0.067 & 0.4 && 0.065 & 0.4 &\ CFI & 1QI & 0.067 & 0.2 && 0.071 & 0.2 && 0.070 & 0.2 && 0.063 & 0.3 && 0.061 & 0.3 &\ CFI & 2LS & 0.066 & 0.3 && 0.068 & 0.1 && 0.067 & 0.2 && 0.066 & 0.5 && 0.064 & 0.5 &\ CFI & 2QI & 0.061 & 0.2 && 0.063 & && 0.063 & 0.1 && 0.061 & 0.5 && 0.059 & 0.5 &\ CFF & 1LS & 0.057 & 0.8 && 0.047 & && 0.051 & && 0.059 & 0.5 && 0.086 & 4.3 & 0.94\ CFF & 1QI & 0.057 & 0.7 && 0.049 & && 0.053 & && 0.057 & 0.5 && 0.083 & 4.1 & 0.94\ CFF & 2LS & 0.045 & 0.6 && 0.026 & && 0.032 & && 0.065 & 0.4 && 0.093 & 3.5 & 0.94\ CFF & 2QI & 0.042 & 0.6 && 0.026 & && 0.031 & && 0.058 & 0.4 && 0.083 & 3.6 & 0.94\ ADF & 1LS & 0.037 & 13.8 && 0.040 & 18.0 && 0.041 & 18.0 && 0.031 & 6.0 && 0.029 & 5.8 &\ ADF & 1QI & 0.038 & 29.9 && 0.042 & 35.4 && 0.042 & 36.0 && 0.029 & 19.0 && 0.027 & 17.5 &\ ADF & 2LS & 0.027 & 22.5 && 0.029 & 23.6 && 0.029 & 24.7 && 0.021 & 18.2 && 0.020 & 16.7 &\ ADF & 2QI & 0.026 & 39.2 && 0.029 & 44.1 && 0.030 & 45.4 && 0.018 & 27.9 && 0.017 & 25.5 &\ ADF$^2$ & 1LS & 0.029 & 0.7 && 0.033 & 1.0 && 0.033 & 0.9 && 0.023 & 0.2 && 0.023 & 0.3 &\ ADF$^2$ & 1QI & 0.029 & 0.8 && 0.032 & 1.2 && 0.032 & 1.1 && 0.022 & 0.2 && 0.022 & 0.3 &\ ADF$^2$ & 2LS & 0.019 & 0.1 && 0.020 & 0.2 && 0.020 & 0.2 && 0.018 & && 0.018 & 0.1 &\ ADF$^2$ & 2QI & 0.018 & && 0.018 & && 0.018 & && 0.017 & && 0.017 & &\ [lllD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}ll]{} & & && && && && &\ && $\sigma$ (pix) & && $\sigma$ (pix) & && $\sigma$ (pix) & && $\sigma$ (pix) & && $\sigma$ (pix) & &\ SDF & 1LS & 0.087 & 0.1 && 0.092 & 0.1 && 0.090 & 0.1 && 0.082 & && 0.083 & &\ SDF & 1QI & 0.080 & 0.1 && 0.084 & 0.2 && 0.083 & 0.2 && 0.074 & && 0.075 & &\ SDF & 2LS & 0.079 & 0.1 && 0.081 & 0.1 && 0.079 & 0.1 && 0.079 & 0.1 && 0.081 & &\ SDF & 2QI & 0.071 & 0.1 && 0.072 & 0.1 && 0.071 & 0.2 && 0.071 & 0.1 && 0.072 & &\ ADF & 1LS & 0.104 & 0.2 && 0.112 & 0.2 && 0.109 & 0.2 && 0.095 & 0.2 && 0.097 & 0.1 &\ ADF & 1QI & 0.107 & 0.3 && 0.115 & 0.3 && 0.112 & 0.3 && 0.098 & 0.3 && 0.099 & 0.3 &\ ADF & 2LS & 0.100 & 0.2 && 0.104 & 0.2 && 0.102 & 0.2 && 0.095 & 0.2 && 0.097 & 0.1 &\ ADF & 2QI & 0.100 & 0.3 && 0.105 & 0.2 && 0.103 & 0.3 && 0.096 & 0.4 && 0.098 & 0.4 &\ ADF$^2$ & 1LS & 0.108 & 0.1 && 0.113 & 0.1 && 0.111 & 0.1 && 0.102 & && 0.104 & &\ ADF$^2$ & 1QI & 0.108 & 0.1 && 0.111 & 0.1 && 0.109 & 0.1 && 0.106 & 0.1 && 0.108 & &\ ADF$^2$ & 2LS & 0.100 & 0.1 && 0.103 & 0.1 && 0.100 & 0.2 && 0.100 & 0.1 && 0.102 & &\ ADF$^2$ & 2QI & 0.101 & 0.2 && 0.101 & 0.2 && 0.099 & 0.2 && 0.103 & 0.2 && 0.106 & 0.1 &\ SDF & 1LS & 0.055 & 0.1 && 0.058 & 0.1 && 0.058 & 0.1 && 0.050 & && 0.049 & &\ SDF & 1QI & 0.050 & 0.1 && 0.053 & 0.1 && 0.053 & 0.1 && 0.045 & && 0.044 & &\ SDF & 2LS & 0.049 & && 0.049 & && 0.050 & && 0.047 & && 0.047 & &\ SDF & 2QI & 0.043 & && 0.044 & && 0.044 & && 0.042 & && 0.041 & &\ ADF & 1LS & 0.067 & 0.3 && 0.071 & 0.3 && 0.071 & 0.4 && 0.059 & 0.1 && 0.058 & 0.1 &\ ADF & 1QI & 0.069 & 0.3 && 0.074 & 0.4 && 0.074 & 0.3 && 0.062 & 0.1 && 0.060 & 0.1 &\ ADF & 2LS & 0.063 & 0.2 && 0.065 & 0.1 && 0.065 & 0.2 && 0.059 & 0.1 && 0.058 & 0.1 &\ ADF & 2QI & 0.064 & 0.2 && 0.066 & 0.3 && 0.066 & 0.3 && 0.060 & 0.2 && 0.058 & 0.2 &\ ADF$^2$ & 1LS & 0.068 & && 0.070 & && 0.070 & && 0.063 & && 0.062 & &\ ADF$^2$ & 1QI & 0.069 & && 0.070 & && 0.070 & && 0.066 & && 0.065 & &\ ADF$^2$ & 2LS & 0.062 & && 0.063 & && 0.063 & && 0.061 & && 0.060 & &\ ADF$^2$ & 2QI & 0.064 & && 0.064 & && 0.064 & && 0.064 & && 0.063 & &\ [lllD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}ll]{} & & && && && && &\ && $\sigma$ (pix) & && $\sigma$ (pix) & && $\sigma$ (pix) & && $\sigma$ (pix) & && $\sigma$ (pix) & &\ SDF & 1LS & 0.089 & 0.1 && 0.094 & 0.1 && 0.092 & 0.1 && 0.084 & && 0.086 & &\ SDF & 1QI & 0.082 & 0.1 && 0.086 & 0.2 && 0.085 & 0.1 && 0.076 & && 0.077 & &\ SDF & 2LS & 0.081 & 0.1 && 0.083 & 0.1 && 0.081 & 0.1 && 0.081 & && 0.083 & &\ SDF & 2QI & 0.073 & 0.1 && 0.074 & 0.1 && 0.073 & 0.1 && 0.073 & && 0.074 & &\ ADF & 1LS & 0.103 & 0.2 && 0.110 & 0.2 && 0.108 & 0.2 && 0.094 & 0.1 && 0.096 & &\ ADF & 1QI & 0.103 & 0.3 && 0.111 & 0.3 && 0.109 & 0.3 && 0.093 & 0.2 && 0.094 & 0.2 &\ ADF & 2LS & 0.098 & 0.2 && 0.103 & 0.2 && 0.101 & 0.2 && 0.094 & 0.1 && 0.096 & 0.1 &\ ADF & 2QI & 0.096 & 0.3 && 0.101 & 0.2 && 0.099 & 0.2 && 0.091 & 0.3 && 0.092 & 0.3 &\ ADF$^2$ & 1LS & 0.107 & 0.1 && 0.112 & 0.1 && 0.110 & 0.1 && 0.101 & && 0.103 & &\ ADF$^2$ & 1QI & 0.103 & 0.1 && 0.107 & 0.2 && 0.105 & 0.1 && 0.100 & 0.1 && 0.102 & &\ ADF$^2$ & 2LS & 0.099 & 0.1 && 0.101 & 0.1 && 0.099 & 0.1 && 0.098 & && 0.101 & &\ ADF$^2$ & 2QI & 0.096 & 0.1 && 0.097 & 0.2 && 0.095 & 0.2 && 0.097 & 0.1 && 0.099 & 0.1 &\ SDF & 1LS & 0.057 & 0.1 && 0.060 & && 0.060 & 0.1 && 0.052 & && 0.051 & &\ SDF & 1QI & 0.052 & 0.1 && 0.055 & 0.1 && 0.055 & 0.1 && 0.047 & && 0.047 & &\ SDF & 2LS & 0.051 & && 0.052 & && 0.052 & && 0.049 & && 0.049 & &\ SDF & 2QI & 0.046 & && 0.046 & && 0.046 & && 0.044 & && 0.044 & &\ ADF & 1LS & 0.067 & 0.3 && 0.071 & 0.4 && 0.072 & 0.4 && 0.060 & && 0.059 & 0.1 &\ ADF & 1QI & 0.068 & 0.3 && 0.073 & 0.5 && 0.073 & 0.4 && 0.059 & 0.1 && 0.058 & 0.1 &\ ADF & 2LS & 0.064 & 0.2 && 0.066 & 0.2 && 0.066 & 0.2 && 0.059 & 0.1 && 0.058 & 0.1 &\ ADF & 2QI & 0.063 & 0.3 && 0.065 & 0.3 && 0.066 & 0.3 && 0.058 & 0.2 && 0.056 & 0.2 &\ ADF$^2$ & 1LS & 0.068 & && 0.071 & && 0.071 & && 0.063 & && 0.062 & &\ ADF$^2$ & 1QI & 0.066 & && 0.068 & && 0.068 & && 0.063 & && 0.061 & &\ ADF$^2$ & 2LS & 0.063 & && 0.063 & && 0.064 & && 0.061 & && 0.060 & &\ ADF$^2$ & 2QI & 0.061 & && 0.061 & && 0.062 & && 0.060 & && 0.059 & &\ [lllD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}ll]{} & & && && && && &\ && $\sigma$ (pix) & && $\sigma$ (pix) & && $\sigma$ (pix) & && $\sigma$ (pix) & && $\sigma$ (pix) & &\ CFF & 1LS & 0.146 & 2.0 && 0.076 & && 0.105 & && 0.193 & 1.6 && 0.305 & 11.6 & 0.95\ CFF & 1QI & 0.141 & 1.9 && 0.076 & && 0.103 & && 0.183 & 1.5 && 0.293 & 11.6 & 0.95\ CFF & 2LS & 0.136 & 2.0 && 0.063 & && 0.095 & && 0.187 & 1.7 && 0.295 & 11.8 & 0.95\ CFF & 2QI & 0.131 & 2.0 && 0.062 & && 0.092 & && 0.176 & 1.5 && 0.282 & 11.7 & 0.95\ CFF & 1LS & 0.077 & 0.4 && 0.044 & && 0.063 & && 0.082 & 0.5 && 0.122 & 1.7 & 0.97\ CFF & 1QI & 0.074 & 0.3 && 0.044 & && 0.061 & && 0.077 & 0.5 && 0.116 & 1.6 & 0.97\ CFF & 2LS & 0.072 & 0.3 && 0.034 & && 0.054 & && 0.082 & 0.4 && 0.123 & 1.5 & 0.97\ CFF & 2QI & 0.068 & 0.3 && 0.032 & && 0.052 & && 0.076 & 0.3 && 0.115 & 1.4 & 0.97\ [lllD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}@rcD[.]{}[.]{}[2.1]{}ll]{} & & && && && && &\ && $\sigma$ (pix) & && $\sigma$ (pix) & && $\sigma$ (pix) & && $\sigma$ (pix) & && $\sigma$ (pix) & &\ CFF & 1LS & 0.148 & 2.0 && 0.079 & && 0.107 & && 0.194 & 1.6 && 0.307 & 11.8 & 0.95\ CFF & 1QI & 0.144 & 2.0 && 0.079 & && 0.105 & && 0.185 & 1.5 && 0.295 & 11.7 & 0.95\ CFF & 2LS & 0.139 & 2.0 && 0.066 & && 0.098 & && 0.188 & 1.6 && 0.295 & 12.0 & 0.95\ CFF & 2QI & 0.133 & 2.0 && 0.065 & && 0.095 & && 0.177 & 1.4 && 0.283 & 11.9 & 0.95\ CFF & 1LS & 0.078 & 0.3 && 0.046 & && 0.064 & && 0.083 & 0.5 && 0.123 & 1.6 & 0.97\ CFF & 1QI & 0.076 & 0.3 && 0.046 & && 0.063 & && 0.078 & 0.4 && 0.117 & 1.5 & 0.97\ CFF & 2LS & 0.073 & 0.3 && 0.036 & && 0.056 & && 0.084 & 0.3 && 0.124 & 1.3 & 0.97\ CFF & 2QI & 0.069 & 0.3 && 0.035 & && 0.054 & && 0.078 & 0.3 && 0.116 & 1.2 & 0.97\ [rl D[.]{}[.]{}[1.2]{}D[.]{}[.]{}[1.3]{}D[.]{}[.]{}[1.3]{}D[.]{}[.]{}[1.2]{}D[.]{}[.]{}[1.3]{}c D[.]{}[.]{}[1.2]{}D[.]{}[.]{}[1.3]{}D[.]{}[.]{}[1.3]{}D[.]{}[.]{}[1.2]{}D[.]{}[.]{}[1.3]{}]{} & & & &\ & & & & & & & & & & & &\ 5 & SDF & 1.11 & 0.054 & 0.049 & 1.67 & 1.011 &&1.13 & 0.158 & 0.144 & 1.22 & 1.011\ 5 & ADF$^2$ & 1.11 & 0.055 & 0.050 & 1.65 & 1.010 &&1.13 & 0.169 & 0.154 & 1.27 & 1.010\ 5 & CFI & 1.12 & 0.090 & 0.082 & 1.64 & 1.012 &&1.16 & 0.181 & 0.165 & 3.44 & 1.012\ 5 & CFF & 0.84 & 0.111 & 0.101 & 10.43 & 0.832 &&0.81 & 0.509 & 0.464 & 2.06 & 0.505\ 7 & SDF & 0.84 & 0.030 & 0.037 & 0.55 & 1.009 &&0.85 & 0.115 & 0.138 & 0.52 & 1.009\ 7 & ADF$^2$ & 0.84 & 0.031 & 0.037 & 0.52 & 1.009 &&0.85 & 0.123 & 0.148 & 0.54 & 1.009\ 7 & CFI & 0.84 & 0.063 & 0.076 & 0.57 & 1.010 &&0.86 & 0.132 & 0.159 & 1.27 & 1.010\ 7 & CFF & 0.69 & 0.064 & 0.077 & 4.01 & 0.859 &&0.69 & 0.235 & 0.283 & 5.57 & 0.796\ 10 & SDF & 0.62 & 0.019 & 0.031 & 0.13 & 1.009 &&0.63 & 0.095 & 0.154 & 0.25 & 1.009\ 10 & ADF$^2$ & 0.62 & 0.020 & 0.032 & 0.12 & 1.009 &&0.63 & 0.102 & 0.166 & 0.28 & 1.009\ 10 & CFI & 0.62 & 0.051 & 0.083 & 0.23 & 1.009 &&0.64 & 0.110 & 0.178 & 0.54 & 1.009\ 10 & CFF & 0.53 & 0.042 & 0.069 & 1.46 & 0.871 &&0.55 & 0.179 & 0.291 & 2.51 & 0.840\ 15 & SDF & 0.44 & 0.014 & 0.031 & 0.16 & 1.008 &&0.45 & 0.085 & 0.194 & 0.31 & 1.008\ 15 & ADF$^2$ & 0.44 & 0.014 & 0.033 & 0.14 & 1.008 &&0.45 & 0.091 & 0.208 & 0.33 & 1.008\ 15 & CFI & 0.45 & 0.046 & 0.104 & 0.24 & 1.008 &&0.46 & 0.099 & 0.225 & 0.48 & 1.009\ 15 & CFF & 0.38 & 0.029 & 0.066 & 1.03 & 0.876 &&0.41 & 0.157 & 0.357 & 1.06 & 0.855\ 20 & SDF & 0.35 & 0.011 & 0.033 & 0.04 & 1.008 &&0.36 & 0.082 & 0.236 & 0.20 & 1.008\ 20 & ADF$^2$ & 0.35 & 0.012 & 0.035 & 0.03 & 1.008 &&0.36 & 0.088 & 0.254 & 0.22 & 1.008\ 20 & CFI & 0.35 & 0.043 & 0.126 & 0.09 & 1.008 &&0.36 & 0.095 & 0.274 & 0.37 & 1.009\ 20 & CFF & 0.30 & 0.023 & 0.067 & 0.82 & 0.878 &&0.34 & 0.149 & 0.431 & 0.72 & 0.859\ 5 & SDF & 1.11 & 0.052 & 0.047 & 1.66 & 1.010 &&1.12 & 0.111 & 0.102 & 0.97 & 1.011\ 5 & ADF$^2$ & 1.11 & 0.052 & 0.048 & 1.64 & 1.010 &&1.12 & 0.119 & 0.108 & 0.93 & 1.010\ 5 & CFI & 1.11 & 0.068 & 0.062 & 1.28 & 1.012 &&1.12 & 0.121 & 0.110 & 1.03 & 1.012\ 5 & CFF & 1.00 & 0.067 & 0.061 & 2.11 & 0.919 &&0.98 & 0.193 & 0.176 & 3.10 & 0.900\ 7 & SDF & 0.84 & 0.029 & 0.035 & 0.55 & 1.009 &&0.84 & 0.078 & 0.094 & 0.39 & 1.009\ 7 & ADF$^2$ & 0.84 & 0.029 & 0.035 & 0.51 & 1.009 &&0.84 & 0.083 & 0.100 & 0.38 & 1.009\ 7 & CFI & 0.84 & 0.043 & 0.052 & 0.39 & 1.010 &&0.85 & 0.085 & 0.102 & 0.39 & 1.010\ 7 & CFF & 0.77 & 0.038 & 0.046 & 0.69 & 0.930 &&0.78 & 0.137 & 0.166 & 0.87 & 0.923\ 10 & SDF & 0.62 & 0.018 & 0.029 & 0.12 & 1.008 &&0.62 & 0.063 & 0.102 & 0.11 & 1.008\ 10 & ADF$^2$ & 0.62 & 0.018 & 0.030 & 0.12 & 1.008 &&0.63 & 0.068 & 0.110 & 0.13 & 1.008\ 10 & CFI & 0.62 & 0.032 & 0.053 & 0.09 & 1.009 &&0.63 & 0.070 & 0.113 & 0.10 & 1.009\ 10 & CFF & 0.58 & 0.024 & 0.040 & 0.36 & 0.935 &&0.59 & 0.114 & 0.185 & 0.34 & 0.931\ 15 & SDF & 0.44 & 0.012 & 0.028 & 0.16 & 1.008 &&0.45 & 0.056 & 0.127 & 0.19 & 1.008\ 15 & ADF$^2$ & 0.44 & 0.012 & 0.028 & 0.14 & 1.008 &&0.45 & 0.060 & 0.137 & 0.19 & 1.008\ 15 & CFI & 0.44 & 0.028 & 0.063 & 0.18 & 1.009 &&0.45 & 0.062 & 0.142 & 0.18 & 1.009\ 15 & CFF & 0.41 & 0.017 & 0.038 & 0.39 & 0.938 &&0.42 & 0.102 & 0.232 & 0.36 & 0.935\ 20 & SDF & 0.35 & 0.010 & 0.029 & 0.02 & 1.008 &&0.35 & 0.053 & 0.155 & 0.07 & 1.008\ 20 & ADF$^2$ & 0.35 & 0.010 & 0.029 & 0.03 & 1.008 &&0.35 & 0.057 & 0.166 & 0.09 & 1.008\ 20 & CFI & 0.35 & 0.026 & 0.075 & 0.05 & 1.009 &&0.35 & 0.060 & 0.173 & 0.06 & 1.009\ 20 & CFF & 0.32 & 0.013 & 0.039 & 0.25 & 0.939 &&0.34 & 0.098 & 0.282 & 0.27 & 0.935\ 5 & SDF & 1.11 & 0.051 & 0.047 & 1.66 & 1.010 &&1.11 & 0.083 & 0.075 & 1.08 & 1.011\ 5 & ADF$^2$ & 1.11 & 0.051 & 0.047 & 1.64 & 1.010 &&1.11 & 0.087 & 0.079 & 1.05 & 1.011\ 5 & CFI & 1.11 & 0.058 & 0.053 & 1.41 & 1.011 &&1.11 & 0.087 & 0.079 & 1.06 & 1.011\ 5 & CFF & 1.06 & 0.054 & 0.049 & 1.47 & 0.965 &&1.06 & 0.129 & 0.117 & 0.88 & 0.962\ 7 & SDF & 0.84 & 0.028 & 0.034 & 0.58 & 1.009 &&0.84 & 0.055 & 0.066 & 0.38 & 1.009\ 7 & ADF$^2$ & 0.84 & 0.028 & 0.034 & 0.52 & 1.009 &&0.84 & 0.058 & 0.070 & 0.34 & 1.009\ 7 & CFI & 0.84 & 0.035 & 0.042 & 0.46 & 1.009 &&0.84 & 0.059 & 0.071 & 0.36 & 1.009\ 7 & CFF & 0.81 & 0.030 & 0.036 & 0.51 & 0.971 &&0.81 & 0.091 & 0.110 & 0.41 & 0.970\ 10 & SDF & 0.62 & 0.017 & 0.028 & 0.11 & 1.008 &&0.62 & 0.043 & 0.070 & 0.05 & 1.008\ 10 & ADF$^2$ & 0.62 & 0.018 & 0.028 & 0.11 & 1.008 &&0.62 & 0.046 & 0.074 & 0.06 & 1.008\ 10 & CFI & 0.62 & 0.025 & 0.040 & 0.12 & 1.008 &&0.62 & 0.047 & 0.076 & 0.05 & 1.008\ 10 & CFF & 0.60 & 0.019 & 0.031 & 0.12 & 0.973 &&0.60 & 0.075 & 0.122 & 0.15 & 0.972\ 15 & SDF & 0.44 & 0.012 & 0.027 & 0.17 & 1.008 &&0.44 & 0.037 & 0.085 & 0.16 & 1.008\ 15 & ADF$^2$ & 0.44 & 0.012 & 0.027 & 0.14 & 1.008 &&0.44 & 0.040 & 0.091 & 0.13 & 1.008\ 15 & CFI & 0.44 & 0.020 & 0.045 & 0.26 & 1.007 &&0.44 & 0.041 & 0.093 & 0.15 & 1.007\ 15 & CFF & 0.43 & 0.013 & 0.029 & 0.18 & 0.974 &&0.43 & 0.067 & 0.152 & 0.23 & 0.974\ 20 & SDF & 0.35 & 0.009 & 0.027 & 0.02 & 1.008 &&0.35 & 0.035 & 0.102 & 0.03 & 1.008\ 20 & ADF$^2$ & 0.35 & 0.009 & 0.027 & 0.02 & 1.008 &&0.35 & 0.038 & 0.110 & 0.03 & 1.008\ 20 & CFI & 0.35 & 0.018 & 0.053 & 0.15 & 1.007 &&0.35 & 0.039 & 0.113 & 0.03 & 1.007\ 20 & CFF & 0.34 & 0.010 & 0.029 & 0.04 & 0.974 &&0.34 & 0.064 & 0.185 & 0.11 & 0.974\ [rl D[.]{}[.]{}[1.2]{}D[.]{}[.]{}[1.3]{}D[.]{}[.]{}[1.3]{}D[.]{}[.]{}[1.2]{}D[.]{}[.]{}[1.3]{}c D[.]{}[.]{}[1.2]{}D[.]{}[.]{}[1.3]{}D[.]{}[.]{}[1.3]{}D[.]{}[.]{}[1.2]{}D[.]{}[.]{}[1.3]{}]{} & & & &\ & & & & & & & & & & & &\ 5 & SDF & 1.09 & 0.054 & 0.049 & 4.09 & 1.011 &&1.09 & 0.114 & 0.104 & 3.50 & 1.010\ 5 & ADF$^2$ & 1.09 & 0.055 & 0.050 & 4.08 & 1.011 &&1.09 & 0.122 & 0.111 & 3.47 & 1.009\ 5 & CFI & 1.09 & 0.084 & 0.077 & 3.91 & 1.011 &&1.10 & 0.135 & 0.123 & 3.88 & 1.010\ 5 & CFF & 0.88 & 0.095 & 0.087 & 8.39 & 0.859 &&0.85 & 0.212 & 0.193 & 8.50 & 0.814\ 7 & SDF & 0.84 & 0.030 & 0.036 & 0.64 & 1.009 &&0.84 & 0.079 & 0.095 & 0.53 & 1.009\ 7 & ADF$^2$ & 0.84 & 0.030 & 0.036 & 0.64 & 1.009 &&0.84 & 0.085 & 0.103 & 0.53 & 1.009\ 7 & CFI & 0.84 & 0.057 & 0.069 & 0.61 & 1.010 &&0.84 & 0.096 & 0.116 & 0.57 & 1.010\ 7 & CFF & 0.71 & 0.055 & 0.066 & 2.69 & 0.879 &&0.71 & 0.135 & 0.163 & 3.20 & 0.864\ 10 & SDF & 0.62 & 0.018 & 0.030 & 0.17 & 1.009 &&0.63 & 0.064 & 0.104 & 0.21 & 1.009\ 10 & ADF$^2$ & 0.62 & 0.019 & 0.030 & 0.16 & 1.009 &&0.63 & 0.069 & 0.113 & 0.22 & 1.009\ 10 & CFI & 0.62 & 0.046 & 0.075 & 0.24 & 1.009 &&0.63 & 0.080 & 0.129 & 0.19 & 1.009\ 10 & CFF & 0.54 & 0.036 & 0.058 & 1.11 & 0.888 &&0.55 & 0.107 & 0.174 & 0.82 & 0.878\ 15 & SDF & 0.44 & 0.012 & 0.027 & 0.08 & 1.008 &&0.45 & 0.057 & 0.129 & 0.17 & 1.008\ 15 & ADF$^2$ & 0.44 & 0.012 & 0.028 & 0.07 & 1.008 &&0.45 & 0.062 & 0.140 & 0.18 & 1.008\ 15 & CFI & 0.45 & 0.041 & 0.093 & 0.12 & 1.008 &&0.45 & 0.071 & 0.163 & 0.12 & 1.008\ 15 & CFF & 0.39 & 0.024 & 0.054 & 0.89 & 0.893 &&0.40 & 0.094 & 0.213 & 0.37 & 0.884\ 20 & SDF & 0.35 & 0.009 & 0.027 & 0.06 & 1.008 &&0.35 & 0.054 & 0.156 & 0.16 & 1.008\ 20 & ADF$^2$ & 0.35 & 0.010 & 0.028 & 0.05 & 1.008 &&0.35 & 0.059 & 0.169 & 0.18 & 1.008\ 20 & CFI & 0.35 & 0.039 & 0.112 & 0.07 & 1.008 &&0.36 & 0.068 & 0.197 & 0.11 & 1.008\ 20 & CFF & 0.31 & 0.018 & 0.053 & 0.88 & 0.895 &&0.32 & 0.089 & 0.256 & 0.33 & 0.886\ 5 & SDF & 1.11 & 0.051 & 0.047 & 1.50 & 1.010 &&1.12 & 0.119 & 0.109 & 0.80 & 1.010\ 5 & ADF$^2$ & 1.11 & 0.052 & 0.047 & 1.48 & 1.010 &&1.12 & 0.129 & 0.118 & 0.78 & 1.010\ 5 & CFI & 1.11 & 0.064 & 0.058 & 1.16 & 1.012 &&1.12 & 0.127 & 0.115 & 0.84 & 1.012\ 5 & CFF & 1.04 & 0.059 & 0.053 & 1.33 & 0.946 &&1.04 & 0.209 & 0.190 & 1.79 & 0.933\ 7 & SDF & 0.84 & 0.029 & 0.035 & 0.40 & 1.009 &&0.84 & 0.084 & 0.102 & 0.20 & 1.009\ 7 & ADF$^2$ & 0.84 & 0.029 & 0.035 & 0.39 & 1.009 &&0.84 & 0.092 & 0.111 & 0.23 & 1.009\ 7 & CFI & 0.84 & 0.040 & 0.048 & 0.30 & 1.010 &&0.84 & 0.090 & 0.109 & 0.21 & 1.010\ 7 & CFF & 0.79 & 0.034 & 0.041 & 0.34 & 0.954 &&0.80 & 0.153 & 0.184 & 0.56 & 0.950\ 10 & SDF & 0.62 & 0.019 & 0.031 & 0.08 & 1.008 &&0.62 & 0.069 & 0.112 & 0.07 & 1.008\ 10 & ADF$^2$ & 0.62 & 0.019 & 0.031 & 0.08 & 1.008 &&0.63 & 0.075 & 0.122 & 0.08 & 1.008\ 10 & CFI & 0.62 & 0.030 & 0.049 & 0.24 & 1.009 &&0.63 & 0.075 & 0.121 & 0.07 & 1.009\ 10 & CFF & 0.59 & 0.023 & 0.037 & 0.11 & 0.958 &&0.60 & 0.128 & 0.208 & 0.31 & 0.955\ 15 & SDF & 0.44 & 0.014 & 0.032 & 0.02 & 1.008 &&0.45 & 0.062 & 0.141 & 0.04 & 1.007\ 15 & ADF$^2$ & 0.44 & 0.014 & 0.033 & 0.02 & 1.007 &&0.45 & 0.067 & 0.153 & 0.05 & 1.007\ 15 & CFI & 0.44 & 0.026 & 0.059 & 0.38 & 1.008 &&0.45 & 0.067 & 0.152 & 0.06 & 1.008\ 15 & CFF & 0.42 & 0.017 & 0.038 & 0.04 & 0.960 &&0.44 & 0.116 & 0.263 & 0.25 & 0.958\ 20 & SDF & 0.35 & 0.012 & 0.036 & 0.01 & 1.008 &&0.35 & 0.059 & 0.171 & 0.04 & 1.008\ 20 & ADF$^2$ & 0.35 & 0.013 & 0.036 & 0.01 & 1.008 &&0.35 & 0.064 & 0.186 & 0.05 & 1.008\ 20 & CFI & 0.35 & 0.025 & 0.071 & 0.44 & 1.008 &&0.35 & 0.064 & 0.186 & 0.05 & 1.009\ 20 & CFF & 0.33 & 0.015 & 0.043 & 0.02 & 0.962 &&0.35 & 0.111 & 0.321 & 0.22 & 0.959\ [^1]: This noise level corresponds to photon noise from a CCD with 40 ke$^-$ full well. For a WFS set up to be running all day, the noise level would be larger when the sun is at low elevation and the image therefore darker. However, it is likely that the performance is then limited by other effects than noise, such as image warping from anisoplanatism.
Related Tags: Latest News WASHINGTON (AP) — House Republicans won a historic political fight to hold Attorney General Eric Holder in contempt of Congress, but the GOP probably is still a long way from obtaining documents it wants in an investigation of a bungled Justice Department gun-tracking operation. There are two routes to enforcing the contempt citations approved by the House on Thursday, a criminal prosecution and a civil lawsuit, although the White House on Friday virtually shut down the criminal path. The civil route through the courts would not be resolved anytime soon. White House spokesman Jay Carney said Justice Department going back to the administration of President Ronald Reagan has not pursued prosecutions in contempt cases involving assertions of executive privilege. President Barack Obama invoked a broad form of the privilege to prevent sending department documents to the House Oversight and Government Reform Committee. The chairman, Rep. Darrell Issa, R-Calif., is leading the effort to get the material related to Operation Fast and Furious. “This is pure politics,” Carney said. “Remarkably the chairman of the committee involved here has asserted that he has no evidence that the attorney general knew of Operation Fast and Furious or did anything but take the right action when he learned of it. “No evidence, so if you have no evidence as he has stated now about the White House and the attorney general, what else could this be but politics?” More than 100 Democrats walked out of the House chamber to boycott the first of two contempt votes, saying Republicans were more interested in shameful election-year politics than documents. Republicans demanded the documents for an ongoing investigation, but their arguments focused more on the need for closure for the family of slain Border Patrol agent Brian Terry. Two guns from the gun-tracking operation were found near his body after a shootout in Arizona. Democrats promised closure as well, but said a less-partisan Republican investigation was the only way to get it. Adding to the emotion of the day, the family of the slain agent issued a statement backing the Republicans. “The Terry family takes no pleasure in the contempt vote against Attorney General Eric Holder. Such a vote should not have been necessary. The Justice Department should have released the documents related to Fast and Furious months ago,” the statement said. It all happened on the day that President Barack Obama’s health care law survived in the Supreme Court, prompting some Democrats to speculate that the votes were scheduled to be overwhelmed by news stories about the ruling. About five hours after the court ruled, with news sites flooded with information about the health care ruling, the House voted 255-67 to declare Holder in criminal contempt. The matter goes to the U.S. attorney for the District of Columbia, who works under Holder. A second vote of 258-95 held Holder in civil contempt and authorized the House to file a lawsuit. In past cases, courts have been reluctant to settle disputes between the executive and legislative branches of government. The issue became more complicated when Obama invoked a broad form of executive privilege, a legal position that is designed to keep private certain communications of executive branch agencies. Issa’s committee will consult with the House counsel’s office about a court challenge to the administration’s decision not to cooperate, spokesman Frederick Hill said. The documents were written after Fast and Furious was shut down. The subpoena covered a 10-month period from February 2011, when the Justice Department denied that guns purchased in the U.S. were allowed to “walk” across the border into Mexico, to early December 2011 when the department acknowledged the earlier assertion was in error. Republicans said the contempt citations were necessary because Holder refused to hand over documents that could explain why the Obama administration took 10 months to come clean about gun-walking. The operation identified more than 2,000 illicitly purchased weapons. Some 1,400 of them have yet to be recovered in the failed strategy to track the weapons to gun-running rings.
That awful 3 minutes when I was robbed in Sicily The highlights included being thrown to the ground by six young Italians who couldn’t manage to kick or punch through my grip on my bag; my wife having her camera bag, a recent Christmas / birthday / graduation / Valentine’s present, ripped off her shoulder; her screaming “Polizia! Polizia!” and her brief but courageous pursuit as our assailants fled; two futile visits to the police, where we learned that most young male delinquents in Catania have protruding ears, which may be significant but not to this story; and the ensuing period of resisting the urge to paint broad strokes of judgment all over Sicily, which would be a even larger injustice than the mugging. Aside from one chunk of ground in Catania, I highly recommend visiting the island. I’m still puzzled by those three minutes. Aside from the first blow, I don’t remember any physical pain. The strongest memory I retain is the feeling of disbelief towards the events as they unfolded. That something could be taken from me (or, more accurately, something could be taken from my wife and from us) felt so unreal. This thought, along with muscles strengthened by years of playing guitar, may be why I simply refused to let go of my bag. But what did give way under those kicks and punches was my grip on my self-narrative. We travel and we take. This is true for most travelers. Confession: I enjoy taking, but not as much as I used to. I still like how my thumb magically causes cars to stop, and I still enjoy those warm beds strangers offer me. (Couchsurfing? More like “Here’s the keys to my apartment,” or, “Let me show you around the city, feed you, and give you this nice bed” -surfing.) But the focus changed as I slowly realized these were opportunities to share a piece of life with others. I felt I had reached a place where responding with hospitality isn’t an obligation but a reflex and an opportunity…and then I was beaten and robbed and confused in Catania, Sicily. I felt the change the next day when we returned to the scene of the crime. The daylight gave the nondescript street innocence. Mothers were hanging laundry and old ladies were returning from grocery shopping, plaid rolling bags in tow. But to me, everything and everyone seemed guilty. Each car that passed was for a split-second the blue getaway car our assailants piled into. I felt fear as teenagers zipped by on mopeds. Unable to shake the role of victim, accusation became a salve for helplessness, and I had to fight the urge to view everyone as a potential threat. The store we had stumbled into the previous night was closed. The shop owners had refused to call the police or help at all. Their eyes had been full of fear and complacency. To some extent I empathize with them, but only because a few times in life that come to mind when I didn’t help those who needed it. That time I was walking to my apartment in Prague and saw a man beating his wife. Or that time in the Republic of Georgia when my co-teacher’s drunken husband kidnapped her at knifepoint in the middle of a 10th-grade English lesson. I don’t excuse the shopkeepers — or myself. I still feel helpless when I tell this story. Retelling it is easy, almost boring. It happened, it’s part of my life, but I still don’t understand it. I’m still waiting for the, “And the moral of the story is…” moment, if it ever comes. I can’t think of a feeling worse than helplessness towards the past. I’ve chewed over the whole Catania business countless times, and I still don’t know how to approach its memory. But I am rebuilding trust — night is less dark, long walks are regaining their status as God’s gift to mankind, and strangers are less strange. I have to. If I don’t continue using travel as a means to live better in this world full of humans, then a lot more was taken than just a camera. We use cookies to understand how you use our site and to improve your experience. This includes personalizing content and advertising. For more information on how we use cookies consult our revised Privacy Policy and Terms of Service.
Makes sense. We only know the altitude of the object is stable so it is moving in a plane parallel to the surface. The cos(left) assumes we know it is moving parallel to the jet but this may not be the case, though it seems to be if you look at the movement of the sea compared to the movement of the object. Note that the jet is only flying straight and level with the ATFLIR locked for just 4-5 seconds (from 1:35-1:39 in the 'official' video). Most of the time it is banking left, which would considerably tilt the reference plane of the ATFLIR. I cannot immediately see whether you took this effect into account, just a heads-up... Click to expand... Thanks Kaen. I'm eager to include the left bank if someone wants to pull the roll angles for me as a function of time. Here it the image that was broken in my previous post. Thanks @Mick West for the instructions. The blue line is the plane flying straight and level looking down and to the left at the object. James Thorpe on FB noted a different way of calculating the position of the target, assuming it is not moving. Black triangle is in the horizontal plane. Distances are in meters. A to B is the movement of the plane between when the angle is 43, and when it's 57 The other angle at B is 180-57 = 123. The last angle (C) in the black triangle is hence (180-1230-43) = 14. The lengths of the other two sides can be calculated as 3610/sin(14 degrees)*sin(43 degree) = 10177 and 3610/sin(14 degrees)*sin(123 degree) = 12514 However this put the range at 13.9km = 7.5 NM, when the "RNG" is reading 4.4 NM. The sensor is 640x480, but the video may only show a square 480x480 part of it. We've debated whether the narrow (NAR) FOV is 0.7 degrees, 1 degree, or 1.5 degrees based on several sources. Click to expand... Thank you for that. I read the brochure and some of the others posted here. Mostly they're over my head. I'm looking for the simple ATFLIR camera numbers that I can plug into a "3ds Max Camera" in order to recreate the scene in 3D: Obviously, these elementary details are not available to the public. If they were I guess the dizzying array of math calculations here wouldn't be quite as necessary. I say that with the utmost respect. In the meantime....fun to look at but perhaps a futile effort (someone on Reddit apparently took the obvious road and tweeted at Raytheon asking for the specs of the camera), I stabilized the better resolution WAPO version of the video: I think I see what is going on here once I realized that the blue triangle is slanted down toward the object. Looks like they too are assuming level flight (easiest assumption). Any path connecting the 13, 923 line with the 12,275 line will represent a trajectory constant with the angular measurements. So put a point on the 13,923 line 8150 meters [4.4 nautical miles] from A and on place another 6300 meters [3.4 nautical miles] from B on the blue line labeled 12,275 and connect the two new points with a line. The red line is a path that is consistent with the angles and distances. I think I see what is going on here once I realized that the blue triangle is slanted down toward the object. Looks like they too are assuming level flight (easiest assumption). Any path connecting the 13, 923 line with the 12,275 line will represent a trajectory constant with the angular measurements. So put a point on the 13,923 line 8150 meters [4.4 nautical miles] from A and on place another 6300 meters [3.4 nautical miles] from B on the blue line labeled 12,275 and connect the two new points with a line. The red line is a path that is consistent with the angles and distances. I read the brochure and some of the others posted here. Mostly they're over my head. I'm looking for the simple ATFLIR camera numbers that I can plug into a "3ds Max Camera" in order to recreate the scene in 3D: Obviously, these elementary details are not available to the public. If they were I guess the dizzying array of math calculations here wouldn't be quite as necessary. I say that with the utmost respect. The maximum documented speed for an Albatross I found is 104 mph, stated in this study. measurements of one grey-headed albatross during an Antarctic storm showed that the bird travelled for 9 h at ground speeds of between 110 kph (30.5 m s−1) and 168 kph (46.7 m s−1). Content from external source 168 kph = 104 mph The Albatross is not native to the North American East Coast. However, the ranges of long-distance fliers are quite variable and accordingly there are dozens of sightings of Albatross along the East Coast, including at least one shot by a hunter . This study catalogues 171 observed Albatross, 13 in Florida: While rare, I suspect Albatross are much more common than extraterrestrial aircraft. The Albatross flies in what's called dynamic soaring, depicted here: So the question arises if the object in the Go-Fast video could be an Albatross based on its flight path. The question would be if during a 30-second snapshot an Albatross could maintain a fairly straight flightpath. I suspect the answer is yes. The maximum documented speed for an Albatross I found is 104 mph, stated in this study. measurements of one grey-headed albatross during an Antarctic storm showed that the bird travelled for 9 h at ground speeds of between 110 kph (30.5 m s−1) and 168 kph (46.7 m s−1). Content from external source 168 kph = 104 mph The Albatross is not native to the North American East Coast. However, the ranges of long-distance fliers are quite variable and accordingly there are dozens of sightings of Albatross along the East Coast, including at least one shot by a hunter . This study catalogues 171 observed Albatross, 13 in Florida: While rare, I suspect Albatross are much more common than extraterrestrial aircraft. The Albatross flies in what's called dynamic soaring, depicted here: So the question arises if the object in the Go-Fast video could be an Albatross based on its flight path. The question would be if during a 30-second snapshot an Albatross could maintain a fairly straight flightpath. I suspect the answer is yes. Click to expand... The dynamic soaring pattern in the graph is close to the water, not two miles high. Do albatrosses fly high? It could be another bird, though I'm not convinced that a bird would look colder than water in IR. The dynamic soaring pattern in the graph is close to the water, not two miles high. Do albatrosses fly high? It could be another bird, though I'm not convinced that it would look cooler than water in IR. Click to expand... I think temperature could for sure be lower then the surface water being that the bird can fly as fast as 100 mph and would be getting air cooled then. However it would need to be able to fly for at least 30 secs in a straight line which should not be impossible for it to do. And relatively high altitude of 2 miles which seems like the only harder part to explain.. To tie this to the Nimitz video, notice how the WSO was able to manually slew the camera and acquire the target after a couple of tries. This should've been trivial to do in the Nimitz video when it broke lock at the end. The dynamic soaring pattern in the graph is close to the water, not two miles high. Do albatrosses fly high? It could be another bird, though I'm not convinced that a bird would look colder than water in IR. To tie this to the Nimitz video, notice how the WSO was able to manually slew the camera and acquire the target after a couple of tries. This should've been trivial to do in the Nimitz video when it broke lock at the end. Click to expand... In the last part of the Nimitz video a range indication pops up: 99.9 RNG 99. I wonder what that means. Maybe the WSO tries a radar lock but the radar basically indicates 'out of range'? In the GO FAST video the ATFLIR lock is immediately accompanied by a range and closure speed indication. This could mean the radar first locks on to the target and the ATFLIR follows the radar's directions? The dynamic soaring pattern in the graph is close to the water, not two miles high. Do albatrosses fly high? It could be another bird, though I'm not convinced that a bird would look colder than water in IR. Click to expand... JFDee posted this footage as an attachment, which I've converted to a GIF animation: The gull is cooler than land, but warmer than sky. If the ocean was reflecting the sky's temperature, you're probably right. So, imo, the next non-extraordinary option to consider is a weather balloon recently released, and so still climbing to its upper altitude. A balloon also fits the shape of the thermal signature. Take for example: I can confirm that the left bank will reduce the speed of the object. I cranked in a guess of 1 m/s^2 left turn and reduced the object speed to 50 kts. Click to expand... I also found a huge effect from adding in the left turn on the two point analysis. Here's the plane's flight in a straight line (green), and the same distance with a very slight left turn (red). The two sight lines at 43° and 57° are shown. The initial 43° is unchanged, but the the 57° changes significantly. The blue line (F-G) shows the horizontal path of the target assuming straight line of the jet With the slight left turn, this now becomes the very different short orange line. This animation shows the effect of varying the turn rate on the calculated speed. The blue line is the simple two point analysis with jet traveling in a straight line. The orange line is with a variety of turn rates from the jet. The minimum is around 270m in 19 seconds, about 28 knots, 32mph. The bank angle of the jet varies though. So a more sophisticated analysis might reveal more. Hi everyone. I just joined after reading up on this subject after becoming extremely skeptical of Tom DeLonge's "To The Stars" companies and their intentions, and the implication of Helene Cooper, Ralph Blumenthal and Leslie Kean (of the New York Times) in what appears to be promotion of Tom's business venture. I don't have much to add outside of the great work already done by everyone else, just came to say I sent in a FOIA request to the Defense Intelligence Agency asking for the full videos being used by the New York Times and Tom DeLonge's companies. As others have said, I suspect these are routine training videos where young pilots are testing target lock systems. You can take almost any video like this and splice it to seem like the pilots "don't know what they targeted" and are "extra shocked" because often they don't unless they get closer, and often are excited they achieved a target lock on something as stupid as a bird. Imagine your reaction if you, a new pilot flying a fighter jet, were able to get a multimillion dollar laser target lock device to lock onto an albatross or weather balloon. If the objects in these videos were perceived as threats or truly unidentified objects the size of an aircraft, they wouldn't be sending young pilots out on training missions, un-armed, to intercept them. And they certainly wouldn't be de-classifying and releasing the videos. Add to that the fact that these videos allegedly came out of an agency which was nothing but a pork project for US Sen. Harry Reid to give his friend and fellow UFO enthusiast, Robert Bigelow, a boatload of cash. The whole concept is just ridiculous. Hi everyone. I just joined after reading up on this subject after becoming extremely skeptical of Tom DeLonge's "To The Stars" companies and their intentions, and the implication of Helene Cooper, Ralph Blumenthal and Leslie Kean (of the New York Times) in what appears to be promotion of Tom's business venture. I don't have much to add outside of the great work already done by everyone else, just came to say I sent in a FOIA request to the Defense Intelligence Agency asking for the full videos being used by the New York Times and Tom DeLonge's companies. Click to expand... You need not bother filing any request. As I just wrote on my Bad UFOs Blog, dozens of such requests have already been filed by UFO researchers and by reporters. Nobody has turned up anything so far, they are all coming back "no records." A balloon seems plausible. But I would expect a weather ballon to be climbing more. Still, there's lots of different types of balloon. Click to expand... From a graph in this study it looks like with no wind a 1-meter radius helium weather balloon @ 10,000 ft would ascend around 383 ft over 30 seconds or if it was hydrogen filled would ascend around 433 ft over 30 seconds. The fact that the study says, "We will assume throughout this paper that there is no wind, so that the balloon velocity is vertical," implies that wind can have a considerable impact on ascent rate. I'd presume that impact would be to reduce the ascent rate. Looking at the sea surface in the Go Fast video, I'd say there's significant wind occurring. The range relevant to our analysis is, I believe (if I follow, the Go Fast object is around 10,000 ft), at the edge of the uncontrolled-ascent phase, zone 1. The ascent during 30 seconds with high winds might be trivial or even too small to detect on the ATFLIR. To confidently recreate the scene in 3D I really am going to need that cmos/ccd image sensor size. Here is the first stage at setting up the 3D. For now the camera is just perpendicular to the ocean surface and I started with plugging in 1050mm for the focal length, you can see the camera settings highlighted in red. Also, I saw the RNG in some of the math equations, can someone explain what it is? And what is the Vc? Also, I saw the RNG in some of the math equations, can someone explain what it is? And what is the Vc? Click to expand... RNG is the distance to the target, presumably in nautical miles Vc is the closing velocity, presumably in knots. i.e. the component of the relative velocity of the object parallel to the line of sight. Here is the first stage at setting up the 3D. For now the camera is just perpendicular to the ocean surface and I started with plugging in 1050mm for the focal length, you can see the camera settings highlighted in red. RNG is the distance to the target, presumably in nautical miles Vc is the closing velocity, presumably in knots. i.e. the component of the relative velocity of the object parallel to the line of sight. The fact that the study says, "We will assume throughout this paper that there is no wind, so that the balloon velocity is vertical," implies that wind can have a considerable impact on ascent rate. Click to expand... No I don't think that is what is implied. A typical ascent rate would be around 5 m/s although it can be adjusted by the amount of gas it is filled with. In strong winds you might want to overfill it so that it does not disappear behind a mountain, for example. The ascent rate is independent of wind speed, although its actual velocity is the vector sum of the vertical ascent rate and horizontal wind velocity. No I don't think that is what is implied. A typical ascent rate would be around 5 m/s although it can be adjusted by the amount of gas it is filled with. In strong winds you might want to overfill it so that it does not disappear behind a mountain, for example. The ascent rate is independent of wind speed, although its actual velocity is the vector sum of the vertical ascent rate and horizontal wind velocity. Gas released from a pressurized tank can be freezing cold. I wonder if that would cause the gas in a recently filled weather balloon to be markedly colder than the surrounding atmosphere, like Go Fast. In a quick search I didn't find such a weather balloon imaged on FLIR.
Let h = 2647/26 + -102. Is w <= h? False Let u be 10*(0 + (-6)/15). Let v be -1589 - u/(6/(-3)). Let m = -6413/4 - v. Which is bigger: m or -11? -11 Let o = 6/24617 - 1477782/3126359. Which is smaller: 0 or o? o Suppose b = 45 + 89. Let r = b + -124. Is 9 less than r? True Let i = 31755 + -32417. Which is smaller: i or -648? i Suppose 56*j = 53*j - 9, z - 33 = j. Is 0.039 at most as big as z? True Let o(h) = h**2 - 5*h + 8. Let r be o(3). Suppose 2*k - r = 3*k. Let y be ((-2559)/(-639) + -4)*k. Which is bigger: -1 or y? y Let x be 18/(-24) - (-25 + 15). Which is smaller: x or 4? 4 Suppose 10*u = 36*u - 12350. Let j = 492 - u. Is j greater than or equal to 125/7? False Let g = -26.18 - 61.73. Let j = 88 + g. Which is smaller: j or -0.18? -0.18 Let t be ((11/33)/(9 + -8))/((-98)/78). Which is smaller: t or -1? -1 Let n be (-58*(-1 - -2))/(486/(-6075)). Is 726 != n? True Let i = -31.03 + 30.9379. Is -2/3 at most i? True Let i(p) = -10*p**2 + 402*p - 150. Let c be i(40). Which is greater: c or 2/11? 2/11 Let k = 48510 - 84728. Is k greater than -36217? False Suppose -5*w - 35 = 0, -4*x - 2*w - 472 - 294 = 0. Let r be ((-566)/5)/(3/5). Which is smaller: x or r? r Let b = 27434/3 + -9064. Let t(m) = m**2 + 44*m + 260. Let r be t(-10). Let f = r + b. Is f < 1? True Let t(a) = -a**3 + 44*a**2 + 87*a + 215. Let q be t(46). Which is greater: -21 or q? q Let v = 87 + -87. Suppose 15*p = 8*p + 28. Let c be -10*(132/(-32) + p). Is c > v? True Let n = -110 + 115. Let j be (4/10)/((-8)/n). Is j less than -2/3? False Let m be 16/12*(3 + (-298917)/56937) + 3. Do -1 and m have the same value? False Let t(h) = 45*h**2 - 625*h - 27. Let o be t(12). Is o greater than -1047? False Let z = 65874.1 + -65874. Let f = 7333/752 + -1/752. Which is bigger: z or f? f Let z be (-2)/(-3)*(-206 - 37). Let g = 120 + z. Let y be (0 + 1)/(1/(-43)). Which is greater: g or y? g Let p(a) = -4*a + 16. Suppose 2*n - 4*i = 0, n - 1 = 3*i - 0*i. Let u = 3 - n. Let x be p(u). Is -5/2 greater than x? True Let r = -0.121 + -118.879. Let v = r - -118. Which is smaller: -122 or v? -122 Let x = 2 - 2. Let f = 412794/14245 + -1072/37. Is f at least as big as x? True Suppose 367 = 4*a - d - 737, -5*a - 4*d = -1401. Suppose 8*s - 419 - a = 0. Are 86 and s equal? False Let m be 53/(-106)*11876/87138. Let d = m + 4/927. Which is smaller: 0 or d? d Let z be (0 - -1) + (29 - (-493)/(-17)). Is z not equal to 33/112? True Let l = -13.08 + -1.62. Let t = -14.6 - l. Which is greater: -55 or t? t Let r = -11 + 12. Let s be -21 + 2/r - (1 - 3). Suppose a + 2*a - 5*z + 71 = 0, 0 = 2*z - 8. Is s bigger than a? False Let p = -145 + 144. Let q = 5/21 - 4/7. Is q at least as big as p? True Suppose 175*m = 340356 + 432911 - 129792. Is m at least 3677? True Suppose -279*f = -273*f + 36. Let l(m) = -9*m - 40. Let h(y) = -8*y - 41. Let t(p) = 5*h(p) - 4*l(p). Let r be t(f). Is r smaller than 1? True Suppose 4*u - 4 = 4*m, 0 = -m + 3*m + 3*u - 8. Let i be 10/(m - (-2)/(-4)). Let k = 29061 - 29042. Do i and k have different values? True Let x be (-5)/(-1) + -1 + 753/(-188). Let y = -11 - -11. Do y and x have different values? True Let u(s) = 5*s + 16. Let f be u(4). Let j be (f/12)/((-2)/(-36)). Suppose -12*z + 3*z - j = 0. Which is smaller: z or -0.1? z Let w = -37.473 - -37.7. Let b = w - -1.773. Is 17 at least b? True Let l = -39.7841 - 0.1659. Let u = -40 - l. Let d = -5.009 + 0.009. Which is smaller: u or d? d Let a = -71.72 - -72. Let k = 0.5 - a. Which is greater: 0.2 or k? k Let h = -46416493/49784 + -108/6223. Let s = h - -932. Is 19 <= s? False Let d(x) = x**2 - 4*x - 1. Let p be (-4)/22 + (-184)/(-44). Let z be d(p). Let o be (-1 + (-2 - z))*37/185. Which is bigger: o or -3/8? -3/8 Suppose -2*c = -238 - 410. Let x be (-108)/c + ((-860)/6)/2. Is x greater than -72? False Let f(l) = 33*l + 1653. Let o be f(-65). Is 2/5 <= o? False Let k(o) = -o**2 - 54*o - 433. Let i be k(-41). Which is smaller: i or 599/6? 599/6 Suppose -3*f = 5*l + 4188, -l - 1455 + 623 = -5*f. Which is smaller: 20 or l? l Suppose -5*v + 40 = 2*g, 0*v + 5*g = -2*v + 37. Let l be (18/(-8))/(v/16). Which is greater: l or -34/7? -34/7 Let h = 221 + -74. Let y be h/70 - 9/6. Which is bigger: 66 or y? 66 Let w(s) = -3*s + 25. Let u be w(8). Let l be 1362/1092 - 4/14 - u. Let q = 50 - 49. Is q at most as big as l? False Let y = 321 - 314. Suppose -7 = y*d + 98. Does -2/5 = d? False Let x(q) = -q - 8*q + 15*q - 9. Let v be x(3). Suppose -u - 4*c + v = -12, 5*c = 3*u + 22. Is u <= 19? True Let k = -24.63 + 3.95. Let n = k - -22.9. Let l = 0.22 - n. Does l = -0.1? False Suppose -83 = 26*t - 291. Let b(z) = z**3 - 10*z**2 + 12*z - 33. Let r be b(t). Which is smaller: r or -67? -67 Suppose x = 4*p - 2 - 6, 2*p - 4 = 3*x. Let i be ((-40)/(-140))/(14 + -307). Is i smaller than x? True Let o = -1743 - -1725. Which is bigger: o or -63? o Let o be 40/35 + 2 + (-336848)/56. Is -6009 at most as big as o? False Suppose -2*u - 635 = s - 583, 0 = 4*s - 5*u + 169. Which is smaller: 98 or s? s Suppose -n + 0*n = -4*c - 32, -8 = 2*n + c. Let h be 78/145 + (-6)/15. Which is smaller: n or h? n Suppose -24 = -2*x + 8. Suppose 3*h + 20 = -a + 6*a, 4*h + 4*a - x = 0. Suppose -3*i + h = 3. Is i > 1/38? False Suppose -25*n + 2*n = -1380. Suppose 5*b = -5*i - n, 9*b + i + 25 = 7*b. Is b greater than or equal to -1? False Let j be -3642*((-4)/4)/6. Let v = j - 526. Is 78 greater than v? False Let c = -553/1980 - -5/396. Which is bigger: -75 or c? c Let q(g) = 286*g**3 + g**2 + g - 1. Let o be q(-1). Let i = o + 288. Which is smaller: i or 0.088? 0.088 Let p = 4964 + -4977. Let a = -2.02 + 0.02. Let v = -1.7 - a. Does v = p? False Let t = 107 - 5353/50. Let s be -13 - (4 + -17)*1. Is t != s? True Let o(i) = 24*i**3 + 7*i**2 + 119*i - 143. Let h be o(10). Which is greater: h or 25748? 25748 Suppose l - 2 = 417*z - 416*z, z = -4*l - 7. Which is smaller: z or -1212? -1212 Let n be 66/14 + -1 + 42/147. Suppose -3*g = -5*c - 9 + 26, -n*c + 21 = 5*g. Suppose 44 = -c*l - 64. Is -26 at most as big as l? False Let a = -0.13 - 7.87. Let c be (-34072)/237120*-3 - (-9)/24. Let y = 3/494 - c. Is a at least y? False Let w(b) = b**2 + 19*b - 202. Let i be w(-34). Let k = 290 - i. Which is bigger: -17 or k? -17 Let c = -4041 + 4022. Let f be (-6 + 4)/(2/6). Let n(u) = -u**2 - 3*u + 1. Let t be n(f). Which is greater: t or c? t Let x be (-126)/1431*-7 + (-4)/12. Is x > 1? False Let m = 4506 - 4510. Is m bigger than -8/13? False Let i = 269 + -266. Suppose -i*u - 87 + 333 = 0. Is u at least 77? True Let n = 7/128013 - 1036137299/1408143. Which is smaller: -737 or n? -737 Let q(m) = 4*m**3 + 221*m**2 + 107*m + 174. Let h be q(-55). Is h not equal to -2686? False Suppose 44 = 55*g - 51*g. Let y = -1 + 6.1. Let s = 0.1 - y. Which is greater: g or s? g Let l(m) = 112 - 8*m - 9*m - 163. Let x be l(-6). Is 0.3 at most x? True Let n = -1954235/2101344 - -2811/3304. Let b = 7/96 + n. Which is bigger: b or 1? 1 Let c = 306.02 + 82.98. Which is smaller: -7 or c? -7 Let h = 33629/4425 + 1/4425. Which is smaller: 11 or h? h Suppose -5*s + 10 = 2*i + 3*i, 3*s - 16 = 2*i. Suppose -4*t + k - 1 = 0, -k + 5*k = -t + s. Let n be 3 - 0 - (-32)/(-10) - t. Which is greater: n or 1? 1 Let o(s) = s**2 + 6. Let k be o(0). Let r be k/15 - (-1089)/15 - 2. Is r bigger than 71? False Let t be (-1)/(525/33) + -5 + (-288)/(-56). Is 48 less than or equal to t? False Let k = -97 + -128. Suppose 0 = -28*r + 32*r + 2*i + 892, -4*r = 6*i + 876. Is r >= k? True Suppose 2*l + 16 = -78. Let b = 27692 + -27740. Is l < b? False Let l = -4.091 - 0.169. Let w = 5.26 + l. Is 260 >= w? True Let u = -737.09 - -16.09. Let o = u - -720.8. Is -72 at most as big as o? True Suppose -3*t + 20 = -4*i, 5*t - 13 = i + 9. Suppose 5*p - 3*m = m + 30, 3*m + 7 = -t*p. Suppose -p - 10 = 12*q. Which is smaller: q or 0.104? q Let d = 7.47 + -7.8. Let b = 2.1 + -3.97. Let j = b + d. Do 1 and j have the same value? False Suppose 484 - 199 = 19*u. Suppose 79 = u*o + 64. Let h be -1 - ((-36)/(-34))/(-1). Which is smaller: h or o? h Let c = -2585 + 2587. Which is greater: c or -1.802? c Let c(q) = q**3 - 7*q**2 - 10. Let t be c(7). Let m(s) = -s**3 - 5*s**2 + 21*s - 8. Let i be m(-8). Let o be (i/10 + -2)*t. Which is greater: 9 or o? 9 Let y be (-
Buy A Second-hand Car Without Regretting It Are you interested in buying a second-hand motor but worried about getting a raw deal? Then read on. There are many steps you can take to reduce the chances of getting ripped off, including asking a more experienced person to guide you through the whole process. Buying from a dealer rather than a private seller is also a good move as you’ll get protection from the Sale of Goods Act and should find it easy to return the car if you later discover a fault you weren’t told about before. Finding used cars for sale in Birmingham is easier than you might think. Seller or Dealer? One of the first things you should do is work out how much you can afford to spend. You may well wish to draw up a list of four or five suitable models, find out how much they usually go for and then start looking around the forecourts. Don’t be swayed by the price tag alone as you’ll need to cover tax, insurance and running costs as well as the price of the car. Ask as many questions as you feel you need to. Most dealers belong to trade networks and risk being removed from them if they fail to treat you fairly, so it will always be in their best interests to co-operate with you if you find a problem. A Full Service History Only view a car in dry and light conditions where faults cannot be hidden. Make sure you conduct an emergency stop when taking the car for a test drive so you can see how it performs in dangerous scenarios. Look for evidence of a full service history and ensure the information printed on the paperwork matches up with what you have been told.
module pressure_mod use task_util_mod implicit none contains ! Non-blocking receives before blocking sends subroutine pressure(ncrms) ! Original pressure solver based on horizontal slabs ! (C) 1998, 2002 Marat Khairoutdinov ! Works only when the number of slabs is equal to the number of processors. ! Therefore, the number of processors shouldn't exceed the number of levels nzm ! Also, used for a 2D version ! For more processors for the given number of levels and 3D, use pressure_big use vars use params, only: dowallx, dowally, crm_rknd use press_rhs_mod use press_grad_mod use fft_mod use openacc_utils implicit none integer, intent(in) :: ncrms integer, parameter :: npressureslabs = nsubdomains integer, parameter :: nzslab = max(1,nzm / npressureslabs) integer, parameter :: nx2=nx_gl+2, ny2=ny_gl+2*YES3D integer, parameter :: n3i=3*nx_gl/2+1,n3j=3*ny_gl/2+1 real(crm_rknd) work(nx2,ny2) real(crm_rknd) ftmp(nx2,ny2) real(crm_rknd) ftmp_x(nx2) real(crm_rknd) ftmp_y(ny2) real(8) b,e real(8) xi,xj,xnx,xny,ddx2,ddy2,pii,factx,facty real(8) alfa(nzm-1),beta(nzm-1) integer i, j, k, id, jd, m, n, it, jt, ii, jj, icrm integer nyp22 real(crm_rknd), allocatable :: f (:,:,:,:) ! global rhs and array for FTP coefficeients real(crm_rknd), allocatable :: ff(:,:,:,:) ! local (subdomain's) version of f integer , allocatable :: iii(:) integer , allocatable :: jjj(:) integer , allocatable :: ifaxi(:) integer , allocatable :: ifaxj(:) real(crm_rknd), allocatable :: trigxi(:) real(crm_rknd), allocatable :: trigxj(:) real(8) , allocatable :: a(:,:) real(8) , allocatable :: c(:,:) integer iwall,jwall integer :: numgangs !For working aroung PGI OpenACC bug where it didn't create enough gangs real(8), allocatable :: eign(:,:) allocate( f (ncrms,nx2,ny2,nzslab) ) allocate( ff(ncrms,nx+1,ny+2*YES3D,nzm) ) allocate( iii(0:nx_gl) ) allocate( jjj(0:ny_gl) ) allocate( ifaxi(100) ) allocate( ifaxj(100) ) allocate( trigxi(n3i) ) allocate( trigxj(n3j) ) allocate( a(ncrms,nzm) ) allocate( c(ncrms,nzm) ) call prefetch( f ) call prefetch( ff ) call prefetch( iii ) call prefetch( jjj ) call prefetch( ifaxi ) call prefetch( ifaxj ) call prefetch( trigxi ) call prefetch( trigxj ) call prefetch( a ) call prefetch( c ) it = 0 jt = 0 !----------------------------------------------------------------- if(dowallx) then iwall=1 else iwall=0 endif if(RUN2D) then nyp22=1 jwall=0 else nyp22=nyp2 if(dowally) then jwall=2 else jwall=0 endif endif allocate(eign(nxp1-iwall,nyp22-jwall)) call prefetch(eign) !----------------------------------------------------------------- ! Compute the r.h.s. of the Poisson equation for pressure call press_rhs(ncrms) !----------------------------------------------------------------- ! Form the horizontal slabs of right-hand-sides of Poisson equation n = 0 !$acc parallel loop collapse(4) async(asyncid) do k = 1,nzslab do j = 1,ny do i = 1,nx do icrm = 1 , ncrms f(icrm,i,j,k) = p(icrm,i,j,k) enddo enddo enddo enddo !------------------------------------------------- ! Perform Fourier transformation for a slab: !$acc parallel loop async(asyncid) do icrm = 1 , 1 call fftfax_crm(nx_gl,ifaxi,trigxi) if(RUN3D) call fftfax_crm(ny_gl,ifaxj,trigxj) enddo !$acc parallel loop gang vector collapse(3) private(work,ftmp_x) async(asyncid) do k=1,nzslab do j = 1 , ny_gl do icrm = 1 , ncrms !$acc cache(ftmp_x,work) ftmp_x = f(icrm,:,j,k) call fft991_crm(ftmp_x,work,trigxi,ifaxi,1,nx2,nx_gl,1,-1) f(icrm,:,j,k) = ftmp_x enddo enddo enddo if(RUN3D) then !$acc parallel loop gang vector collapse(3) private(work,ftmp_y) async(asyncid) do k=1,nzslab do i = 1 , nx_gl+1 do icrm = 1 , ncrms !$acc cache(ftmp_y,work) ftmp_y = f(icrm,i,:,k) call fft991_crm(ftmp_y,work,trigxj,ifaxj,1,nx2,ny_gl,1,-1) f(icrm,i,:,k) = ftmp_y enddo enddo enddo endif !------------------------------------------------- ! Send Fourier coeffiecients back to subdomains: !$acc parallel loop collapse(4) async(asyncid) do k = 1,nzslab do j = 1,nyp22-jwall do i = 1,nxp1-iwall do icrm = 1 , ncrms ff(icrm,i,j,k) = f(icrm,i,j,k) enddo enddo enddo enddo !------------------------------------------------- ! Solve the tri-diagonal system for Fourier coeffiecients ! in the vertical for each subdomain: !$acc parallel loop collapse(2) async(asyncid) do k=1,nzm do icrm = 1 , ncrms a(icrm,k)=rhow(icrm,k)/(adz(icrm,k)*adzw(icrm,k)*dz(icrm)*dz(icrm)) c(icrm,k)=rhow(icrm,k+1)/(adz(icrm,k)*adzw(icrm,k+1)*dz(icrm)*dz(icrm)) enddo enddo !$acc parallel loop collapse(2) async(asyncid) do j=1,nyp22-jwall do i=1,nxp1-iwall ddx2=1._8/(dx*dx) ddy2=1._8/(dy*dy) pii = 3.14159265358979323846D0 xnx=pii/nx_gl xny=pii/ny_gl if(dowally) then jd=j+jt-1 facty = 1.d0 else jd=(j+jt-0.1)/2. facty = 2.d0 endif xj=jd if(dowallx) then id=i+it-1 factx = 1.d0 else id=(i+it-0.1)/2. factx = 2.d0 endif xi=id eign(i,j)=(2._8*cos(factx*xnx*xi)-2._8)*ddx2+(2._8*cos(facty*xny*xj)-2._8)*ddy2 enddo enddo !For working aroung PGI OpenACC bug where it didn't create enough gangs numgangs = ceiling(ncrms*(nyp22-jwall)*(nxp2-iwall)/128.) !$acc parallel loop gang vector collapse(3) vector_length(128) num_gangs(numgangs) private(alfa,beta) async(asyncid) do j=1,nyp22-jwall do i=1,nxp1-iwall do icrm = 1 , ncrms !$acc cache(alfa,beta) if(dowally) then jd=j+jt-1 else jd=(j+jt-0.1)/2. endif if(dowallx) then id=i+it-1 else id=(i+it-0.1)/2. endif if(id+jd.eq.0) then b=1._8/(eign(i,j)*rho(icrm,1)-a(icrm,1)-c(icrm,1)) alfa(1)=-c(icrm,1)*b beta(1)=ff(icrm,i,j,1)*b else b=1._8/(eign(i,j)*rho(icrm,1)-c(icrm,1)) alfa(1)=-c(icrm,1)*b beta(1)=ff(icrm,i,j,1)*b endif do k=2,nzm-1 e=1._8/(eign(i,j)*rho(icrm,k)-a(icrm,k)-c(icrm,k)+a(icrm,k)*alfa(k-1)) alfa(k)=-c(icrm,k)*e beta(k)=(ff(icrm,i,j,k)-a(icrm,k)*beta(k-1))*e enddo ff(icrm,i,j,nzm)=(ff(icrm,i,j,nzm)-a(icrm,nzm)*beta(nzm-1))/(eign(i,j)*rho(icrm,nzm)-a(icrm,nzm)+a(icrm,nzm)*alfa(nzm-1)) do k=nzm-1,1,-1 ff(icrm,i,j,k)=alfa(k)*ff(icrm,i,j,k+1)+beta(k) enddo enddo enddo enddo !----------------------------------------------------------------- n = 0 !$acc parallel loop collapse(4) async(asyncid) do k = 1,nzslab do j = 1,nyp22-jwall do i = 1,nxp1-iwall do icrm = 1 , ncrms f(icrm,i,j,k) = ff(icrm,i,j,k) enddo enddo enddo enddo !------------------------------------------------- ! Perform inverse Fourier transformation: if(RUN3D) then !$acc parallel loop gang vector collapse(3) private(ftmp_y,work) async(asyncid) do k=1,nzslab do i = 1 , nx_gl+1 do icrm = 1 , ncrms !$acc cache(ftmp_y,work) ftmp_y = f(icrm,i,:,k) call fft991_crm(ftmp_y,work,trigxj,ifaxj,1,nx2,ny_gl,1,+1) f(icrm,i,:,k) = ftmp_y enddo enddo enddo endif !$acc parallel loop gang vector collapse(3) private(ftmp_x,work) async(asyncid) do k=1,nzslab do j = 1 , ny_gl do icrm = 1 , ncrms !$acc cache(ftmp_x,work) ftmp_x = f(icrm,:,j,k) call fft991_crm(ftmp_x,work,trigxi,ifaxi,1,nx2,nx_gl,1,+1) f(icrm,:,j,k) = ftmp_x enddo enddo enddo !----------------------------------------------------------------- ! Fill the pressure field for each subdomain: !$acc parallel loop async(asyncid) do icrm = 1,1 do i=1,nx_gl iii(i)=i enddo iii(0)=nx_gl do j=1,ny_gl jjj(j)=j enddo jjj(0)=ny_gl enddo n = 0 !$acc parallel loop collapse(4) async(asyncid) do k = 1,nzslab do j = 1-YES3D,ny do i = 0,nx do icrm = 1 , ncrms jj=jjj(j) ii=iii(i) p(icrm,i,j,k) = f(icrm,ii,jj,k) enddo enddo enddo enddo ! Add pressure gradient term to the rhs of the momentum equation: call press_grad(ncrms) deallocate(eign) deallocate( f ) deallocate( ff ) deallocate( iii ) deallocate( jjj ) deallocate( ifaxi ) deallocate( ifaxj ) deallocate( trigxi ) deallocate( trigxj ) deallocate( a ) deallocate( c ) end subroutine pressure end module pressure_mod
Intercooler.js: AJAX with Attributes - mrswag http://intercoolerjs.org/ ====== carsongross A couple of philosophical essays around intercooler: "Rescuing REST From the API Winter" \- [http://intercoolerjs.org/2016/01/18/rescuing- rest.html](http://intercoolerjs.org/2016/01/18/rescuing-rest.html) "HATEOAS is for Humans" \- [http://intercoolerjs.org/2016/05/08/hatoeas-is- for-humans.ht...](http://intercoolerjs.org/2016/05/08/hatoeas-is-for- humans.html)
Company Gala Films A group of oddball high school students find themselves trapped in detention during a zombie apocalypse. Can they put their differences aside and work together to survive the night? Fat chance! After all, this is high school.
Processes underlying young children's spatial orientation during movement. Children between 1.5 and 4 years old were tested for their ability to relocate a hidden object after a 180 degrees self-produced movement around an array of four locations. In one task the object's location relative to the other locations could be uniquely defined within one dimension, while in another two dimensions were needed to do this. No differences emerged between conditions, and by 3 years few errors occurred, despite the fact that children were unable to view the array during movement. This indicates either that young children encounter no specific difficulty in coordinating dimensions or that they solved the task without recourse to such a system. An error analysis supports the second possibility. Children apparently tackled the task by a system directly related to body movement, since errors were frequently the result of incomplete compensation for movement around the array. In a second study in which the four containers were placed in contact, children's performance declined and the relation between direction of movement and error was replaced by some evidence for updating on the near-far dimension accompanied by failure to update the left-right dimension. Thus children appear to change strategy when the problem requires more precise specification of target location.
ASMR: The sound that massages your brain In some, it causes a tingling sensation that starts in the scalp and travels down the body. Others claim it lowers blood pressure, eases anxiety and can even induce sleep. The interface is YouTube where a growing number of "whisperers" softly talk the viewer through a role play situation featuring "triggers". The videos can show hands organizing a jewelry box, a virtual doctor visit or spa treatment. The voices are soft and whispery. The triggers can be the sound of the voices or other sounds like tapping, scratching, paper rustling or brush stokes on canvas. The users of whisper videos even created a pseudo-scientific name for the phenomenon: Autonomous Sensory Meridian Response. "I had the personal experience of getting this sensation", said Drew, an ASMRtist, or whisperer. "It's kind of like a tingling sensation that starts in your scalp. It might travel down your spine, sometimes it might travel out through your limbs. It's similar to the sensation of chills." Drew, an undergraduate in psychology with experience in a clinical setting, says he makes the videos because he's interested in all kinds of therapy. "A lot of people will write me personal messages and say that I've helped them get off their medication for anxiety or insomnia," he said. While there is no scientific proof of ASMR, it is starting to get some attention in the world of academia. Yale professor of neurology, Steven Novella, recently blogged about ASMR and didn't dismiss the idea of its existence. "Is it real? In this case, I don't think there is a definitive answer, but I am inclined to believe that it is,", wrote Novella. "There are a number of people who seem to have independently (that is always the key, but it is a recent-enough phenomenon that this appears to be true) experienced and described the same syndrome with some fairly specific details. In this way it's similar to migraine headaches - we know they exist as a syndrome primarily because many different people report the same constellation of symptoms and natural history." Ilse is an AMSRtist from the Netherlands who boasts 43,000 subscribers to her WaterWhispers channel. She now whispers fulltime and is able to make a living through her three YouTube channels and a website dedicated to ASMR. "It changed my life completely," said Ilse. "A lot of people use it for relaxation, just to wind down from a stressful day at work. You have different people from different countries around the globe with different professions that listen to my videos and can actually induce relaxation." Sleep experts, though, say whisper videos may not be the best way for you to get quality sleep. Dr. Amer Khan, a sleep specialist with the Sutter Neuroscience Institute, says using ASMR videos is a lot like using a white noise machine or a softly tuned television. It can become a crutch. "There is a problem. It's called a sleep association," said Khan. "Like a baby who has to have a pacifier in the mouth to fall asleep." Khan prefers meditation and says for the best quality sleep, people need to learn to self-soothe and let go of the day. "Sleep requires letting go of things," he said. "You can't sleep if you can't let go of what happened today." The growth of the whisper videos subculture on the Internet can be tracked quite easily through Google search analytics which show a slow increase in the search for whisper videos in 2011 and a spike in 2012. While there is no scientific research currently on ASMR, there is plenty of curiosity. A Canadian filmmaker is working on a documentary about ASMR called, "Braingasms" and the phenomenon has been profiled on NPR. While neurologists discuss the best ways to measure the phenomenon (MRI is the most popular suggestion), those who make the videos say they will continue helping those who are relaxed by the soft whispers and sounds they upload each week. ASMRtists like Ally say the proof comes in the comments they receive from viewers. "One of the most moving comments I had was from a man who had just come home from a tour in Afghanistan, " said Ally. "He was struggling with PTSD and told me he couldn't get to sleep without watching one of my videos. They were getting him into a relaxed state where he could quiet his mind enough to find peace and rest".
WCW! The mouthwatering Betty Namagembe from MUBS A reknown scholar in the early 90’s postulated that ‘people will always stare, just make it worth it’ and this could be a line our crush this week mastered by the heart. Betty Namagembe Mwanaomu, a fresh sweet heart at Makerere University Business School where she pursues her Bachelor of International Business is perfectly aligned with beauty that will leave you with nothing but to savour at her rare awesomeness. She is the kind that walks with the whole universe on her shoulders with an infectious smile. Her soft skin tenderness will leave you wanting. Betty throws away her beauty as though she doesn’t care or know about its effect. You’ll stare at her face and see a touch of poetry with her smile, eyes as bright as the sun that will look directly at your soul. Her soft and plump looking lips are enough to cause a stampede. She is out going, she is a lover of life and not your normal slay babies. What is a crush if she has no valour and self respect? It wasn’t a contradiction and I am not even surprised that her birthday fell on a Wednesday. Today, Betty doesn’t only prove that she is God’s only sample to create humanity but also makes her surrounding better with another year of cuteness. She only gets better with age. Like that old fine wine. Take that chance to join her long world of stalkers. Take that chance to wish her well for the future. Search for ‘mwana.omu’ via Instagram. Her mouth watering pictures and strategically taken selfies will bless your month for the better! Happy Earth Strong, Betty.
Email | MORE! Related: LAWRENCE, KS (KCTV) - A Kansas man has begun an unusual search for his family. He's already found one half-brother, but doesn't know how many others might be out there. David Brown has two dads and a mom, and that unusual family structure is something he's struggled with wrapping his head around his entire life. "I didn't know where I came from, so there's like half of me missing," he said. That's because, while one man raised him, his biological father was a sperm donor. "My dad has always been my dad and he always will be," Brown said. "There are things that I wonder about, like where did that come from." Brown was conceived from a sperm donor at the University of Kansas Medical Center, but it was his wife Lauren who got the ball rolling on Brown's journey to find his biological father. "I was like, ‘oh my gosh, we have to find out who this person is,'" she said. That became difficult because the fertility clinic Brown's parents used no longer exists. Instead the couple turned to the web for help where they tracked down Brown's half-brother who is also looking for his dad. "I didn't know if I could have other siblings out there. It made me really curious," Brown said. He said the search is not just for him, but also for his daughters. "For my kids I feel like it's a hole because I'd like to know medical history, for me and my kids," Brown said. Brown says he knows exactly what he would say to his biological father if he ever met the man. "'Thank you for doing what you did for whatever reason you did' and just that ‘I appreciate that you did that,'" he said. Brown acknowledges that he may never make that happen and says that if he never meets the man who helped give him life, his life is still complete. "I don't feel like I'm missing the father figure or anything like that," he said.
// // CreditCardCondition.swift // FormValidatorSwift // // Created by Onur Ersel on 02/11/2016. // Copyright © 2016 ustwo Fampany Ltd. All rights reserved. // import Foundation /** * The `CreditCardCondition` checks a string for a credit card number. */ public struct CreditCardCondition: ConfigurableCondition { // MARK: - Properties public var localizedViolationString = StringLocalization.sharedInstance.localizedString("US2KeyConditionViolationCreditCard", comment: "") public var regex: String { return configuration.cardType.regex } public var shouldAllowViolation = true public var configuration: CreditCardConfiguration // MARK: - Initializers public init(configuration: CreditCardConfiguration) { self.configuration = configuration } // MARK: - Check /** Checks if the string is a valid credit card number, after removes all whitespace. */ public func check(_ text: String?) -> Bool { guard let sourceText = text, let regExp = try? NSRegularExpression(pattern: self.regex, options: .caseInsensitive) else { return false } let sourceTextNs = sourceText as NSString let trimmedText = sourceTextNs.replacingOccurrences(of: "\\D", with: "", options: .regularExpression, range: NSRange(location: 0, length: sourceTextNs.length)) as String return check(trimmedText, withRegex: regExp) } public func check(_ trimmedText: String, withRegex regExp: NSRegularExpression) -> Bool { return regExp.firstMatch(in: trimmedText, options: [], range: NSRange(location: 0, length: trimmedText.count)) != nil } }
Regulation of putative fatty acid transporters and Acyl-CoA synthetase in liver and adipose tissue in ob/ob mice. The hyperlipidemia associated with obesity and type 2 diabetes is caused by an increase in hepatic triglyceride synthesis and secretion that is secondary to an increase in de novo lipogenesis, a decrease in fatty acid (FA) oxidation, and an increase in the flux of peripherally derived FA to the liver. The uptake of FA across the plasma membrane may be mediated by three distinct proteins--FA translocase (FAT), plasma membrane FA binding protein (FABP-pm), and FA transport protein (FATP)--that have recently been characterized. Acyl-CoA synthetase (ACS) enhances the uptake of FAs by catalyzing their activation to acyl-CoA esters for subsequent use in anabolic or catabolic pathways. In this study, we examine the mRNA levels of FAT, FABP-pm, FATP, and ACS in the liver and adipose tissue of genetically obese (ob/ob) mice and their control littermates. FAT mRNA levels were 15-fold higher in liver and 60-80% higher in adipose tissue of ob/ob mice. FABP-pm mRNA levels were twofold higher in liver and 50% higher in adipose tissue of ob/ob mice. FATP mRNA levels were not increased in liver or adipose tissue. ACS mRNA levels were higher in adipose tissue but remained unchanged in liver. However, the distribution of ACS activity associated with mitochondria and microsomes in liver was altered in ob/ob mice. In control littermates, 61% of ACS activity was associated with mitochondria and 39% with microsomes, whereas in ob/ob mice 34% of ACS activity was associated with mitochondria and 66% with microsomes; this distribution would make more FA available for esterification, rather than oxidation, in ob/ob mouse liver. Taken together, our results suggest that the upregulation of FAT and FABP-pm mRNAs may increase the uptake of FA in adipose tissue and liver in ob/ob mice, which, coupled with an increase in microsomal ACS activity in liver, will enhance the esterification of FA and support the increased triglyceride synthesis and VLDL production that characterizes obesity and type 2 diabetes.