",options:{disabled:!1,create:null},_createWidget:function(t,s){s=e(s||this.defaultElement||this)[0],this.element=e(s),this.uuid=i++,this.eventNamespace="."+this.widgetName+this.uuid,this.options=e.widget.extend({},this.options,this._getCreateOptions(),t),this.bindings=e(),this.hoverable=e(),this.focusable=e(),s!==this&&(e.data(s,this.widgetFullName,this),this._on(!0,this.element,{remove:function(e){e.target===s&&this.destroy()}}),this.document=e(s.style?s.ownerDocument:s.document||s),this.window=e(this.document[0].defaultView||this.document[0].parentWindow)),this._create(),this._trigger("create",null,this._getCreateEventData()),this._init()},_getCreateOptions:e.noop,_getCreateEventData:e.noop,_create:e.noop,_init:e.noop,destroy:function(){this._destroy(),this.element.unbind(this.eventNamespace).removeData(this.widgetName).removeData(this.widgetFullName).removeData(e.camelCase(this.widgetFullName)),this.widget().unbind(this.eventNamespace).removeAttr("aria-disabled").removeClass(this.widgetFullName+"-disabled "+"ui-state-disabled"),this.bindings.unbind(this.eventNamespace),this.hoverable.removeClass("ui-state-hover"),this.focusable.removeClass("ui-state-focus")},_destroy:e.noop,widget:function(){return this.element},option:function(i,s){var n,a,r,o=i;if(0===arguments.length)return e.widget.extend({},this.options);if("string"==typeof i)if(o={},n=i.split("."),i=n.shift(),n.length){for(a=o[i]=e.widget.extend({},this.options[i]),r=0;n.length-1>r;r++)a[n[r]]=a[n[r]]||{},a=a[n[r]];if(i=n.pop(),s===t)return a[i]===t?null:a[i];a[i]=s}else{if(s===t)return this.options[i]===t?null:this.options[i];o[i]=s}return this._setOptions(o),this},_setOptions:function(e){var t;for(t in e)this._setOption(t,e[t]);return this},_setOption:function(e,t){return this.options[e]=t,"disabled"===e&&(this.widget().toggleClass(this.widgetFullName+"-disabled ui-state-disabled",!!t).attr("aria-disabled",t),this.hoverable.removeClass("ui-state-hover"),this.focusable.removeClass("ui-state-focus")),this},enable:function(){return this._setOption("disabled",!1)},disable:function(){return this._setOption("disabled",!0)},_on:function(i,s,n){var a,r=this;"boolean"!=typeof i&&(n=s,s=i,i=!1),n?(s=a=e(s),this.bindings=this.bindings.add(s)):(n=s,s=this.element,a=this.widget()),e.each(n,function(n,o){function h(){return i||r.options.disabled!==!0&&!e(this).hasClass("ui-state-disabled")?("string"==typeof o?r[o]:o).apply(r,arguments):t}"string"!=typeof o&&(h.guid=o.guid=o.guid||h.guid||e.guid++);var l=n.match(/^(\w+)\s*(.*)$/),u=l[1]+r.eventNamespace,c=l[2];c?a.delegate(c,u,h):s.bind(u,h)})},_off:function(e,t){t=(t||"").split(" ").join(this.eventNamespace+" ")+this.eventNamespace,e.unbind(t).undelegate(t)},_delay:function(e,t){function i(){return("string"==typeof e?s[e]:e).apply(s,arguments)}var s=this;return setTimeout(i,t||0)},_hoverable:function(t){this.hoverable=this.hoverable.add(t),this._on(t,{mouseenter:function(t){e(t.currentTarget).addClass("ui-state-hover")},mouseleave:function(t){e(t.currentTarget).removeClass("ui-state-hover")}})},_focusable:function(t){this.focusable=this.focusable.add(t),this._on(t,{focusin:function(t){e(t.currentTarget).addClass("ui-state-focus")},focusout:function(t){e(t.currentTarget).removeClass("ui-state-focus")}})},_trigger:function(t,i,s){var n,a,r=this.options[t];if(s=s||{},i=e.Event(i),i.type=(t===this.widgetEventPrefix?t:this.widgetEventPrefix+t).toLowerCase(),i.target=this.element[0],a=i.originalEvent)for(n in a)n in i||(i[n]=a[n]);return this.element.trigger(i,s),!(e.isFunction(r)&&r.apply(this.element[0],[i].concat(s))===!1||i.isDefaultPrevented())}},e.each({show:"fadeIn",hide:"fadeOut"},function(t,i){e.Widget.prototype["_"+t]=function(s,n,a){"string"==typeof n&&(n={effect:n});var r,o=n?n===!0||"number"==typeof n?i:n.effect||i:t;n=n||{},"number"==typeof n&&(n={duration:n}),r=!e.isEmptyObject(n),n.complete=a,n.delay&&s.delay(n.delay),r&&e.effects&&e.effects.effect[o]?s[t](n):o!==t&&s[o]?s[o](n.duration,n.easing,a):s.queue(function(i){e(this)[t](),a&&a.call(s[0]),i()})}})})(jQuery);(function(e){var t=!1;e(document).mouseup(function(){t=!1}),e.widget("ui.mouse",{version:"1.10.2",options:{cancel:"input,textarea,button,select,option",distance:1,delay:0},_mouseInit:function(){var t=this;this.element.bind("mousedown."+this.widgetName,function(e){return t._mouseDown(e)}).bind("click."+this.widgetName,function(i){return!0===e.data(i.target,t.widgetName+".preventClickEvent")?(e.removeData(i.target,t.widgetName+".preventClickEvent"),i.stopImmediatePropagation(),!1):undefined}),this.started=!1},_mouseDestroy:function(){this.element.unbind("."+this.widgetName),this._mouseMoveDelegate&&e(document).unbind("mousemove."+this.widgetName,this._mouseMoveDelegate).unbind("mouseup."+this.widgetName,this._mouseUpDelegate)},_mouseDown:function(i){if(!t){this._mouseStarted&&this._mouseUp(i),this._mouseDownEvent=i;var s=this,n=1===i.which,a="string"==typeof this.options.cancel&&i.target.nodeName?e(i.target).closest(this.options.cancel).length:!1;return n&&!a&&this._mouseCapture(i)?(this.mouseDelayMet=!this.options.delay,this.mouseDelayMet||(this._mouseDelayTimer=setTimeout(function(){s.mouseDelayMet=!0},this.options.delay)),this._mouseDistanceMet(i)&&this._mouseDelayMet(i)&&(this._mouseStarted=this._mouseStart(i)!==!1,!this._mouseStarted)?(i.preventDefault(),!0):(!0===e.data(i.target,this.widgetName+".preventClickEvent")&&e.removeData(i.target,this.widgetName+".preventClickEvent"),this._mouseMoveDelegate=function(e){return s._mouseMove(e)},this._mouseUpDelegate=function(e){return s._mouseUp(e)},e(document).bind("mousemove."+this.widgetName,this._mouseMoveDelegate).bind("mouseup."+this.widgetName,this._mouseUpDelegate),i.preventDefault(),t=!0,!0)):!0}},_mouseMove:function(t){return e.ui.ie&&(!document.documentMode||9>document.documentMode)&&!t.button?this._mouseUp(t):this._mouseStarted?(this._mouseDrag(t),t.preventDefault()):(this._mouseDistanceMet(t)&&this._mouseDelayMet(t)&&(this._mouseStarted=this._mouseStart(this._mouseDownEvent,t)!==!1,this._mouseStarted?this._mouseDrag(t):this._mouseUp(t)),!this._mouseStarted)},_mouseUp:function(t){return e(document).unbind("mousemove."+this.widgetName,this._mouseMoveDelegate).unbind("mouseup."+this.widgetName,this._mouseUpDelegate),this._mouseStarted&&(this._mouseStarted=!1,t.target===this._mouseDownEvent.target&&e.data(t.target,this.widgetName+".preventClickEvent",!0),this._mouseStop(t)),!1},_mouseDistanceMet:function(e){return Math.max(Math.abs(this._mouseDownEvent.pageX-e.pageX),Math.abs(this._mouseDownEvent.pageY-e.pageY))>=this.options.distance},_mouseDelayMet:function(){return this.mouseDelayMet},_mouseStart:function(){},_mouseDrag:function(){},_mouseStop:function(){},_mouseCapture:function(){return!0}})})(jQuery);(function(e){function t(e){return parseInt(e,10)||0}function i(e){return!isNaN(parseInt(e,10))}e.widget("ui.resizable",e.ui.mouse,{version:"1.10.2",widgetEventPrefix:"resize",options:{alsoResize:!1,animate:!1,animateDuration:"slow",animateEasing:"swing",aspectRatio:!1,autoHide:!1,containment:!1,ghost:!1,grid:!1,handles:"e,s,se",helper:!1,maxHeight:null,maxWidth:null,minHeight:10,minWidth:10,zIndex:90,resize:null,start:null,stop:null},_create:function(){var t,i,s,n,a,o=this,r=this.options;if(this.element.addClass("ui-resizable"),e.extend(this,{_aspectRatio:!!r.aspectRatio,aspectRatio:r.aspectRatio,originalElement:this.element,_proportionallyResizeElements:[],_helper:r.helper||r.ghost||r.animate?r.helper||"ui-resizable-helper":null}),this.element[0].nodeName.match(/canvas|textarea|input|select|button|img/i)&&(this.element.wrap(e("
").css({position:this.element.css("position"),width:this.element.outerWidth(),height:this.element.outerHeight(),top:this.element.css("top"),left:this.element.css("left")})),this.element=this.element.parent().data("ui-resizable",this.element.data("ui-resizable")),this.elementIsWrapper=!0,this.element.css({marginLeft:this.originalElement.css("marginLeft"),marginTop:this.originalElement.css("marginTop"),marginRight:this.originalElement.css("marginRight"),marginBottom:this.originalElement.css("marginBottom")}),this.originalElement.css({marginLeft:0,marginTop:0,marginRight:0,marginBottom:0}),this.originalResizeStyle=this.originalElement.css("resize"),this.originalElement.css("resize","none"),this._proportionallyResizeElements.push(this.originalElement.css({position:"static",zoom:1,display:"block"})),this.originalElement.css({margin:this.originalElement.css("margin")}),this._proportionallyResize()),this.handles=r.handles||(e(".ui-resizable-handle",this.element).length?{n:".ui-resizable-n",e:".ui-resizable-e",s:".ui-resizable-s",w:".ui-resizable-w",se:".ui-resizable-se",sw:".ui-resizable-sw",ne:".ui-resizable-ne",nw:".ui-resizable-nw"}:"e,s,se"),this.handles.constructor===String)for("all"===this.handles&&(this.handles="n,e,s,w,se,sw,ne,nw"),t=this.handles.split(","),this.handles={},i=0;t.length>i;i++)s=e.trim(t[i]),a="ui-resizable-"+s,n=e("
"),n.css({zIndex:r.zIndex}),"se"===s&&n.addClass("ui-icon ui-icon-gripsmall-diagonal-se"),this.handles[s]=".ui-resizable-"+s,this.element.append(n);this._renderAxis=function(t){var i,s,n,a;t=t||this.element;for(i in this.handles)this.handles[i].constructor===String&&(this.handles[i]=e(this.handles[i],this.element).show()),this.elementIsWrapper&&this.originalElement[0].nodeName.match(/textarea|input|select|button/i)&&(s=e(this.handles[i],this.element),a=/sw|ne|nw|se|n|s/.test(i)?s.outerHeight():s.outerWidth(),n=["padding",/ne|nw|n/.test(i)?"Top":/se|sw|s/.test(i)?"Bottom":/^e$/.test(i)?"Right":"Left"].join(""),t.css(n,a),this._proportionallyResize()),e(this.handles[i]).length},this._renderAxis(this.element),this._handles=e(".ui-resizable-handle",this.element).disableSelection(),this._handles.mouseover(function(){o.resizing||(this.className&&(n=this.className.match(/ui-resizable-(se|sw|ne|nw|n|e|s|w)/i)),o.axis=n&&n[1]?n[1]:"se")}),r.autoHide&&(this._handles.hide(),e(this.element).addClass("ui-resizable-autohide").mouseenter(function(){r.disabled||(e(this).removeClass("ui-resizable-autohide"),o._handles.show())}).mouseleave(function(){r.disabled||o.resizing||(e(this).addClass("ui-resizable-autohide"),o._handles.hide())})),this._mouseInit()},_destroy:function(){this._mouseDestroy();var t,i=function(t){e(t).removeClass("ui-resizable ui-resizable-disabled ui-resizable-resizing").removeData("resizable").removeData("ui-resizable").unbind(".resizable").find(".ui-resizable-handle").remove()};return this.elementIsWrapper&&(i(this.element),t=this.element,this.originalElement.css({position:t.css("position"),width:t.outerWidth(),height:t.outerHeight(),top:t.css("top"),left:t.css("left")}).insertAfter(t),t.remove()),this.originalElement.css("resize",this.originalResizeStyle),i(this.originalElement),this},_mouseCapture:function(t){var i,s,n=!1;for(i in this.handles)s=e(this.handles[i])[0],(s===t.target||e.contains(s,t.target))&&(n=!0);return!this.options.disabled&&n},_mouseStart:function(i){var s,n,a,o=this.options,r=this.element.position(),h=this.element;return this.resizing=!0,/absolute/.test(h.css("position"))?h.css({position:"absolute",top:h.css("top"),left:h.css("left")}):h.is(".ui-draggable")&&h.css({position:"absolute",top:r.top,left:r.left}),this._renderProxy(),s=t(this.helper.css("left")),n=t(this.helper.css("top")),o.containment&&(s+=e(o.containment).scrollLeft()||0,n+=e(o.containment).scrollTop()||0),this.offset=this.helper.offset(),this.position={left:s,top:n},this.size=this._helper?{width:h.outerWidth(),height:h.outerHeight()}:{width:h.width(),height:h.height()},this.originalSize=this._helper?{width:h.outerWidth(),height:h.outerHeight()}:{width:h.width(),height:h.height()},this.originalPosition={left:s,top:n},this.sizeDiff={width:h.outerWidth()-h.width(),height:h.outerHeight()-h.height()},this.originalMousePosition={left:i.pageX,top:i.pageY},this.aspectRatio="number"==typeof o.aspectRatio?o.aspectRatio:this.originalSize.width/this.originalSize.height||1,a=e(".ui-resizable-"+this.axis).css("cursor"),e("body").css("cursor","auto"===a?this.axis+"-resize":a),h.addClass("ui-resizable-resizing"),this._propagate("start",i),!0},_mouseDrag:function(t){var i,s=this.helper,n={},a=this.originalMousePosition,o=this.axis,r=this.position.top,h=this.position.left,l=this.size.width,u=this.size.height,c=t.pageX-a.left||0,d=t.pageY-a.top||0,p=this._change[o];return p?(i=p.apply(this,[t,c,d]),this._updateVirtualBoundaries(t.shiftKey),(this._aspectRatio||t.shiftKey)&&(i=this._updateRatio(i,t)),i=this._respectSize(i,t),this._updateCache(i),this._propagate("resize",t),this.position.top!==r&&(n.top=this.position.top+"px"),this.position.left!==h&&(n.left=this.position.left+"px"),this.size.width!==l&&(n.width=this.size.width+"px"),this.size.height!==u&&(n.height=this.size.height+"px"),s.css(n),!this._helper&&this._proportionallyResizeElements.length&&this._proportionallyResize(),e.isEmptyObject(n)||this._trigger("resize",t,this.ui()),!1):!1},_mouseStop:function(t){this.resizing=!1;var i,s,n,a,o,r,h,l=this.options,u=this;return this._helper&&(i=this._proportionallyResizeElements,s=i.length&&/textarea/i.test(i[0].nodeName),n=s&&e.ui.hasScroll(i[0],"left")?0:u.sizeDiff.height,a=s?0:u.sizeDiff.width,o={width:u.helper.width()-a,height:u.helper.height()-n},r=parseInt(u.element.css("left"),10)+(u.position.left-u.originalPosition.left)||null,h=parseInt(u.element.css("top"),10)+(u.position.top-u.originalPosition.top)||null,l.animate||this.element.css(e.extend(o,{top:h,left:r})),u.helper.height(u.size.height),u.helper.width(u.size.width),this._helper&&!l.animate&&this._proportionallyResize()),e("body").css("cursor","auto"),this.element.removeClass("ui-resizable-resizing"),this._propagate("stop",t),this._helper&&this.helper.remove(),!1},_updateVirtualBoundaries:function(e){var t,s,n,a,o,r=this.options;o={minWidth:i(r.minWidth)?r.minWidth:0,maxWidth:i(r.maxWidth)?r.maxWidth:1/0,minHeight:i(r.minHeight)?r.minHeight:0,maxHeight:i(r.maxHeight)?r.maxHeight:1/0},(this._aspectRatio||e)&&(t=o.minHeight*this.aspectRatio,n=o.minWidth/this.aspectRatio,s=o.maxHeight*this.aspectRatio,a=o.maxWidth/this.aspectRatio,t>o.minWidth&&(o.minWidth=t),n>o.minHeight&&(o.minHeight=n),o.maxWidth>s&&(o.maxWidth=s),o.maxHeight>a&&(o.maxHeight=a)),this._vBoundaries=o},_updateCache:function(e){this.offset=this.helper.offset(),i(e.left)&&(this.position.left=e.left),i(e.top)&&(this.position.top=e.top),i(e.height)&&(this.size.height=e.height),i(e.width)&&(this.size.width=e.width)},_updateRatio:function(e){var t=this.position,s=this.size,n=this.axis;return i(e.height)?e.width=e.height*this.aspectRatio:i(e.width)&&(e.height=e.width/this.aspectRatio),"sw"===n&&(e.left=t.left+(s.width-e.width),e.top=null),"nw"===n&&(e.top=t.top+(s.height-e.height),e.left=t.left+(s.width-e.width)),e},_respectSize:function(e){var t=this._vBoundaries,s=this.axis,n=i(e.width)&&t.maxWidth&&t.maxWidth
e.width,r=i(e.height)&&t.minHeight&&t.minHeight>e.height,h=this.originalPosition.left+this.originalSize.width,l=this.position.top+this.size.height,u=/sw|nw|w/.test(s),c=/nw|ne|n/.test(s);return o&&(e.width=t.minWidth),r&&(e.height=t.minHeight),n&&(e.width=t.maxWidth),a&&(e.height=t.maxHeight),o&&u&&(e.left=h-t.minWidth),n&&u&&(e.left=h-t.maxWidth),r&&c&&(e.top=l-t.minHeight),a&&c&&(e.top=l-t.maxHeight),e.width||e.height||e.left||!e.top?e.width||e.height||e.top||!e.left||(e.left=null):e.top=null,e},_proportionallyResize:function(){if(this._proportionallyResizeElements.length){var e,t,i,s,n,a=this.helper||this.element;for(e=0;this._proportionallyResizeElements.length>e;e++){if(n=this._proportionallyResizeElements[e],!this.borderDif)for(this.borderDif=[],i=[n.css("borderTopWidth"),n.css("borderRightWidth"),n.css("borderBottomWidth"),n.css("borderLeftWidth")],s=[n.css("paddingTop"),n.css("paddingRight"),n.css("paddingBottom"),n.css("paddingLeft")],t=0;i.length>t;t++)this.borderDif[t]=(parseInt(i[t],10)||0)+(parseInt(s[t],10)||0);n.css({height:a.height()-this.borderDif[0]-this.borderDif[2]||0,width:a.width()-this.borderDif[1]-this.borderDif[3]||0})}}},_renderProxy:function(){var t=this.element,i=this.options;this.elementOffset=t.offset(),this._helper?(this.helper=this.helper||e("
"),this.helper.addClass(this._helper).css({width:this.element.outerWidth()-1,height:this.element.outerHeight()-1,position:"absolute",left:this.elementOffset.left+"px",top:this.elementOffset.top+"px",zIndex:++i.zIndex}),this.helper.appendTo("body").disableSelection()):this.helper=this.element},_change:{e:function(e,t){return{width:this.originalSize.width+t}},w:function(e,t){var i=this.originalSize,s=this.originalPosition;return{left:s.left+t,width:i.width-t}},n:function(e,t,i){var s=this.originalSize,n=this.originalPosition;return{top:n.top+i,height:s.height-i}},s:function(e,t,i){return{height:this.originalSize.height+i}},se:function(t,i,s){return e.extend(this._change.s.apply(this,arguments),this._change.e.apply(this,[t,i,s]))},sw:function(t,i,s){return e.extend(this._change.s.apply(this,arguments),this._change.w.apply(this,[t,i,s]))},ne:function(t,i,s){return e.extend(this._change.n.apply(this,arguments),this._change.e.apply(this,[t,i,s]))},nw:function(t,i,s){return e.extend(this._change.n.apply(this,arguments),this._change.w.apply(this,[t,i,s]))}},_propagate:function(t,i){e.ui.plugin.call(this,t,[i,this.ui()]),"resize"!==t&&this._trigger(t,i,this.ui())},plugins:{},ui:function(){return{originalElement:this.originalElement,element:this.element,helper:this.helper,position:this.position,size:this.size,originalSize:this.originalSize,originalPosition:this.originalPosition}}}),e.ui.plugin.add("resizable","animate",{stop:function(t){var i=e(this).data("ui-resizable"),s=i.options,n=i._proportionallyResizeElements,a=n.length&&/textarea/i.test(n[0].nodeName),o=a&&e.ui.hasScroll(n[0],"left")?0:i.sizeDiff.height,r=a?0:i.sizeDiff.width,h={width:i.size.width-r,height:i.size.height-o},l=parseInt(i.element.css("left"),10)+(i.position.left-i.originalPosition.left)||null,u=parseInt(i.element.css("top"),10)+(i.position.top-i.originalPosition.top)||null;i.element.animate(e.extend(h,u&&l?{top:u,left:l}:{}),{duration:s.animateDuration,easing:s.animateEasing,step:function(){var s={width:parseInt(i.element.css("width"),10),height:parseInt(i.element.css("height"),10),top:parseInt(i.element.css("top"),10),left:parseInt(i.element.css("left"),10)};n&&n.length&&e(n[0]).css({width:s.width,height:s.height}),i._updateCache(s),i._propagate("resize",t)}})}}),e.ui.plugin.add("resizable","containment",{start:function(){var i,s,n,a,o,r,h,l=e(this).data("ui-resizable"),u=l.options,c=l.element,d=u.containment,p=d instanceof e?d.get(0):/parent/.test(d)?c.parent().get(0):d;p&&(l.containerElement=e(p),/document/.test(d)||d===document?(l.containerOffset={left:0,top:0},l.containerPosition={left:0,top:0},l.parentData={element:e(document),left:0,top:0,width:e(document).width(),height:e(document).height()||document.body.parentNode.scrollHeight}):(i=e(p),s=[],e(["Top","Right","Left","Bottom"]).each(function(e,n){s[e]=t(i.css("padding"+n))}),l.containerOffset=i.offset(),l.containerPosition=i.position(),l.containerSize={height:i.innerHeight()-s[3],width:i.innerWidth()-s[1]},n=l.containerOffset,a=l.containerSize.height,o=l.containerSize.width,r=e.ui.hasScroll(p,"left")?p.scrollWidth:o,h=e.ui.hasScroll(p)?p.scrollHeight:a,l.parentData={element:p,left:n.left,top:n.top,width:r,height:h}))},resize:function(t){var i,s,n,a,o=e(this).data("ui-resizable"),r=o.options,h=o.containerOffset,l=o.position,u=o._aspectRatio||t.shiftKey,c={top:0,left:0},d=o.containerElement;d[0]!==document&&/static/.test(d.css("position"))&&(c=h),l.left<(o._helper?h.left:0)&&(o.size.width=o.size.width+(o._helper?o.position.left-h.left:o.position.left-c.left),u&&(o.size.height=o.size.width/o.aspectRatio),o.position.left=r.helper?h.left:0),l.top<(o._helper?h.top:0)&&(o.size.height=o.size.height+(o._helper?o.position.top-h.top:o.position.top),u&&(o.size.width=o.size.height*o.aspectRatio),o.position.top=o._helper?h.top:0),o.offset.left=o.parentData.left+o.position.left,o.offset.top=o.parentData.top+o.position.top,i=Math.abs((o._helper?o.offset.left-c.left:o.offset.left-c.left)+o.sizeDiff.width),s=Math.abs((o._helper?o.offset.top-c.top:o.offset.top-h.top)+o.sizeDiff.height),n=o.containerElement.get(0)===o.element.parent().get(0),a=/relative|absolute/.test(o.containerElement.css("position")),n&&a&&(i-=o.parentData.left),i+o.size.width>=o.parentData.width&&(o.size.width=o.parentData.width-i,u&&(o.size.height=o.size.width/o.aspectRatio)),s+o.size.height>=o.parentData.height&&(o.size.height=o.parentData.height-s,u&&(o.size.width=o.size.height*o.aspectRatio))},stop:function(){var t=e(this).data("ui-resizable"),i=t.options,s=t.containerOffset,n=t.containerPosition,a=t.containerElement,o=e(t.helper),r=o.offset(),h=o.outerWidth()-t.sizeDiff.width,l=o.outerHeight()-t.sizeDiff.height;t._helper&&!i.animate&&/relative/.test(a.css("position"))&&e(this).css({left:r.left-n.left-s.left,width:h,height:l}),t._helper&&!i.animate&&/static/.test(a.css("position"))&&e(this).css({left:r.left-n.left-s.left,width:h,height:l})}}),e.ui.plugin.add("resizable","alsoResize",{start:function(){var t=e(this).data("ui-resizable"),i=t.options,s=function(t){e(t).each(function(){var t=e(this);t.data("ui-resizable-alsoresize",{width:parseInt(t.width(),10),height:parseInt(t.height(),10),left:parseInt(t.css("left"),10),top:parseInt(t.css("top"),10)})})};"object"!=typeof i.alsoResize||i.alsoResize.parentNode?s(i.alsoResize):i.alsoResize.length?(i.alsoResize=i.alsoResize[0],s(i.alsoResize)):e.each(i.alsoResize,function(e){s(e)})},resize:function(t,i){var s=e(this).data("ui-resizable"),n=s.options,a=s.originalSize,o=s.originalPosition,r={height:s.size.height-a.height||0,width:s.size.width-a.width||0,top:s.position.top-o.top||0,left:s.position.left-o.left||0},h=function(t,s){e(t).each(function(){var t=e(this),n=e(this).data("ui-resizable-alsoresize"),a={},o=s&&s.length?s:t.parents(i.originalElement[0]).length?["width","height"]:["width","height","top","left"];e.each(o,function(e,t){var i=(n[t]||0)+(r[t]||0);i&&i>=0&&(a[t]=i||null)}),t.css(a)})};"object"!=typeof n.alsoResize||n.alsoResize.nodeType?h(n.alsoResize):e.each(n.alsoResize,function(e,t){h(e,t)})},stop:function(){e(this).removeData("resizable-alsoresize")}}),e.ui.plugin.add("resizable","ghost",{start:function(){var t=e(this).data("ui-resizable"),i=t.options,s=t.size;t.ghost=t.originalElement.clone(),t.ghost.css({opacity:.25,display:"block",position:"relative",height:s.height,width:s.width,margin:0,left:0,top:0}).addClass("ui-resizable-ghost").addClass("string"==typeof i.ghost?i.ghost:""),t.ghost.appendTo(t.helper)},resize:function(){var t=e(this).data("ui-resizable");t.ghost&&t.ghost.css({position:"relative",height:t.size.height,width:t.size.width})},stop:function(){var t=e(this).data("ui-resizable");t.ghost&&t.helper&&t.helper.get(0).removeChild(t.ghost.get(0))}}),e.ui.plugin.add("resizable","grid",{resize:function(){var t=e(this).data("ui-resizable"),i=t.options,s=t.size,n=t.originalSize,a=t.originalPosition,o=t.axis,r="number"==typeof i.grid?[i.grid,i.grid]:i.grid,h=r[0]||1,l=r[1]||1,u=Math.round((s.width-n.width)/h)*h,c=Math.round((s.height-n.height)/l)*l,d=n.width+u,p=n.height+c,f=i.maxWidth&&d>i.maxWidth,m=i.maxHeight&&p>i.maxHeight,g=i.minWidth&&i.minWidth>d,v=i.minHeight&&i.minHeight>p;i.grid=r,g&&(d+=h),v&&(p+=l),f&&(d-=h),m&&(p-=l),/^(se|s|e)$/.test(o)?(t.size.width=d,t.size.height=p):/^(ne)$/.test(o)?(t.size.width=d,t.size.height=p,t.position.top=a.top-c):/^(sw)$/.test(o)?(t.size.width=d,t.size.height=p,t.position.left=a.left-u):(t.size.width=d,t.size.height=p,t.position.top=a.top-c,t.position.left=a.left-u)}})})(jQuery);
\ No newline at end of file
diff --git a/llgo/third_party/go.tools/cmd/present/static/print.css b/llgo/third_party/go.tools/cmd/present/static/print.css
new file mode 100644
index 0000000000000000000000000000000000000000..6c58257213e5527287d19232f1414675b762eca0
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/present/static/print.css
@@ -0,0 +1,51 @@
+/* set page layout */
+@page {
+ size: A4 landscape;
+}
+
+body {
+ display: block !important;
+}
+
+.slides {
+ left: 0;
+ top: 0;
+}
+
+.slides > article {
+ position: relative;
+
+ left: 0;
+ top: 0;
+
+ margin: 0 !important;
+ page-break-inside: avoid;
+
+ text-shadow: none; /* disable shadow */
+
+ display: block !important;
+ transform: translate(0) !important;
+ -o-transform: translate(0) !important;
+ -moz-transform: translate(0) !important;
+ -webkit-transform: translate3d(0, 0, 0) !important;
+}
+
+div.code {
+ background: rgb(240, 240, 240);
+}
+
+/* hide click areas */
+.slide-area, #prev-slide-area, #next-slide-area {
+ display: none;
+}
+
+/* add explicit links */
+a:link:after, a:visited:after {
+ content: " (" attr(href) ") ";
+ font-size: 50%;
+}
+
+/* white background */
+body {
+ background: rgb(255,255,255) !important;
+}
diff --git a/llgo/third_party/go.tools/cmd/present/static/slides.js b/llgo/third_party/go.tools/cmd/present/static/slides.js
new file mode 100644
index 0000000000000000000000000000000000000000..ee54c94ec86d30f9ddef7431cedc440e14290857
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/present/static/slides.js
@@ -0,0 +1,518 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var PERMANENT_URL_PREFIX = '/static/';
+
+var SLIDE_CLASSES = ['far-past', 'past', 'current', 'next', 'far-next'];
+
+var PM_TOUCH_SENSITIVITY = 15;
+
+var curSlide;
+
+/* ---------------------------------------------------------------------- */
+/* classList polyfill by Eli Grey
+ * (http://purl.eligrey.com/github/classList.js/blob/master/classList.js) */
+
+if (typeof document !== "undefined" && !("classList" in document.createElement("a"))) {
+
+(function (view) {
+
+var
+ classListProp = "classList"
+ , protoProp = "prototype"
+ , elemCtrProto = (view.HTMLElement || view.Element)[protoProp]
+ , objCtr = Object
+ strTrim = String[protoProp].trim || function () {
+ return this.replace(/^\s+|\s+$/g, "");
+ }
+ , arrIndexOf = Array[protoProp].indexOf || function (item) {
+ for (var i = 0, len = this.length; i < len; i++) {
+ if (i in this && this[i] === item) {
+ return i;
+ }
+ }
+ return -1;
+ }
+ // Vendors: please allow content code to instantiate DOMExceptions
+ , DOMEx = function (type, message) {
+ this.name = type;
+ this.code = DOMException[type];
+ this.message = message;
+ }
+ , checkTokenAndGetIndex = function (classList, token) {
+ if (token === "") {
+ throw new DOMEx(
+ "SYNTAX_ERR"
+ , "An invalid or illegal string was specified"
+ );
+ }
+ if (/\s/.test(token)) {
+ throw new DOMEx(
+ "INVALID_CHARACTER_ERR"
+ , "String contains an invalid character"
+ );
+ }
+ return arrIndexOf.call(classList, token);
+ }
+ , ClassList = function (elem) {
+ var
+ trimmedClasses = strTrim.call(elem.className)
+ , classes = trimmedClasses ? trimmedClasses.split(/\s+/) : []
+ ;
+ for (var i = 0, len = classes.length; i < len; i++) {
+ this.push(classes[i]);
+ }
+ this._updateClassName = function () {
+ elem.className = this.toString();
+ };
+ }
+ , classListProto = ClassList[protoProp] = []
+ , classListGetter = function () {
+ return new ClassList(this);
+ }
+;
+// Most DOMException implementations don't allow calling DOMException's toString()
+// on non-DOMExceptions. Error's toString() is sufficient here.
+DOMEx[protoProp] = Error[protoProp];
+classListProto.item = function (i) {
+ return this[i] || null;
+};
+classListProto.contains = function (token) {
+ token += "";
+ return checkTokenAndGetIndex(this, token) !== -1;
+};
+classListProto.add = function (token) {
+ token += "";
+ if (checkTokenAndGetIndex(this, token) === -1) {
+ this.push(token);
+ this._updateClassName();
+ }
+};
+classListProto.remove = function (token) {
+ token += "";
+ var index = checkTokenAndGetIndex(this, token);
+ if (index !== -1) {
+ this.splice(index, 1);
+ this._updateClassName();
+ }
+};
+classListProto.toggle = function (token) {
+ token += "";
+ if (checkTokenAndGetIndex(this, token) === -1) {
+ this.add(token);
+ } else {
+ this.remove(token);
+ }
+};
+classListProto.toString = function () {
+ return this.join(" ");
+};
+
+if (objCtr.defineProperty) {
+ var classListPropDesc = {
+ get: classListGetter
+ , enumerable: true
+ , configurable: true
+ };
+ try {
+ objCtr.defineProperty(elemCtrProto, classListProp, classListPropDesc);
+ } catch (ex) { // IE 8 doesn't support enumerable:true
+ if (ex.number === -0x7FF5EC54) {
+ classListPropDesc.enumerable = false;
+ objCtr.defineProperty(elemCtrProto, classListProp, classListPropDesc);
+ }
+ }
+} else if (objCtr[protoProp].__defineGetter__) {
+ elemCtrProto.__defineGetter__(classListProp, classListGetter);
+}
+
+}(self));
+
+}
+/* ---------------------------------------------------------------------- */
+
+/* Slide movement */
+
+function getSlideEl(no) {
+ if ((no < 0) || (no >= slideEls.length)) {
+ return null;
+ } else {
+ return slideEls[no];
+ }
+};
+
+function updateSlideClass(slideNo, className) {
+ var el = getSlideEl(slideNo);
+
+ if (!el) {
+ return;
+ }
+
+ if (className) {
+ el.classList.add(className);
+ }
+
+ for (var i in SLIDE_CLASSES) {
+ if (className != SLIDE_CLASSES[i]) {
+ el.classList.remove(SLIDE_CLASSES[i]);
+ }
+ }
+};
+
+function updateSlides() {
+ if (window.trackPageview) window.trackPageview();
+
+ for (var i = 0; i < slideEls.length; i++) {
+ switch (i) {
+ case curSlide - 2:
+ updateSlideClass(i, 'far-past');
+ break;
+ case curSlide - 1:
+ updateSlideClass(i, 'past');
+ break;
+ case curSlide:
+ updateSlideClass(i, 'current');
+ break;
+ case curSlide + 1:
+ updateSlideClass(i, 'next');
+ break;
+ case curSlide + 2:
+ updateSlideClass(i, 'far-next');
+ break;
+ default:
+ updateSlideClass(i);
+ break;
+ }
+ }
+
+ triggerLeaveEvent(curSlide - 1);
+ triggerEnterEvent(curSlide);
+
+ window.setTimeout(function() {
+ // Hide after the slide
+ disableSlideFrames(curSlide - 2);
+ }, 301);
+
+ enableSlideFrames(curSlide - 1);
+ enableSlideFrames(curSlide + 2);
+
+ updateHash();
+};
+
+function prevSlide() {
+ if (curSlide > 0) {
+ curSlide--;
+
+ updateSlides();
+ }
+};
+
+function nextSlide() {
+ if (curSlide < slideEls.length - 1) {
+ curSlide++;
+
+ updateSlides();
+ }
+};
+
+/* Slide events */
+
+function triggerEnterEvent(no) {
+ var el = getSlideEl(no);
+ if (!el) {
+ return;
+ }
+
+ var onEnter = el.getAttribute('onslideenter');
+ if (onEnter) {
+ new Function(onEnter).call(el);
+ }
+
+ var evt = document.createEvent('Event');
+ evt.initEvent('slideenter', true, true);
+ evt.slideNumber = no + 1; // Make it readable
+
+ el.dispatchEvent(evt);
+};
+
+function triggerLeaveEvent(no) {
+ var el = getSlideEl(no);
+ if (!el) {
+ return;
+ }
+
+ var onLeave = el.getAttribute('onslideleave');
+ if (onLeave) {
+ new Function(onLeave).call(el);
+ }
+
+ var evt = document.createEvent('Event');
+ evt.initEvent('slideleave', true, true);
+ evt.slideNumber = no + 1; // Make it readable
+
+ el.dispatchEvent(evt);
+};
+
+/* Touch events */
+
+function handleTouchStart(event) {
+ if (event.touches.length == 1) {
+ touchDX = 0;
+ touchDY = 0;
+
+ touchStartX = event.touches[0].pageX;
+ touchStartY = event.touches[0].pageY;
+
+ document.body.addEventListener('touchmove', handleTouchMove, true);
+ document.body.addEventListener('touchend', handleTouchEnd, true);
+ }
+};
+
+function handleTouchMove(event) {
+ if (event.touches.length > 1) {
+ cancelTouch();
+ } else {
+ touchDX = event.touches[0].pageX - touchStartX;
+ touchDY = event.touches[0].pageY - touchStartY;
+ event.preventDefault();
+ }
+};
+
+function handleTouchEnd(event) {
+ var dx = Math.abs(touchDX);
+ var dy = Math.abs(touchDY);
+
+ if ((dx > PM_TOUCH_SENSITIVITY) && (dy < (dx * 2 / 3))) {
+ if (touchDX > 0) {
+ prevSlide();
+ } else {
+ nextSlide();
+ }
+ }
+
+ cancelTouch();
+};
+
+function cancelTouch() {
+ document.body.removeEventListener('touchmove', handleTouchMove, true);
+ document.body.removeEventListener('touchend', handleTouchEnd, true);
+};
+
+/* Preloading frames */
+
+function disableSlideFrames(no) {
+ var el = getSlideEl(no);
+ if (!el) {
+ return;
+ }
+
+ var frames = el.getElementsByTagName('iframe');
+ for (var i = 0, frame; frame = frames[i]; i++) {
+ disableFrame(frame);
+ }
+};
+
+function enableSlideFrames(no) {
+ var el = getSlideEl(no);
+ if (!el) {
+ return;
+ }
+
+ var frames = el.getElementsByTagName('iframe');
+ for (var i = 0, frame; frame = frames[i]; i++) {
+ enableFrame(frame);
+ }
+};
+
+function disableFrame(frame) {
+ frame.src = 'about:blank';
+};
+
+function enableFrame(frame) {
+ var src = frame._src;
+
+ if (frame.src != src && src != 'about:blank') {
+ frame.src = src;
+ }
+};
+
+function setupFrames() {
+ var frames = document.querySelectorAll('iframe');
+ for (var i = 0, frame; frame = frames[i]; i++) {
+ frame._src = frame.src;
+ disableFrame(frame);
+ }
+
+ enableSlideFrames(curSlide);
+ enableSlideFrames(curSlide + 1);
+ enableSlideFrames(curSlide + 2);
+};
+
+function setupInteraction() {
+ /* Clicking and tapping */
+
+ var el = document.createElement('div');
+ el.className = 'slide-area';
+ el.id = 'prev-slide-area';
+ el.addEventListener('click', prevSlide, false);
+ document.querySelector('section.slides').appendChild(el);
+
+ var el = document.createElement('div');
+ el.className = 'slide-area';
+ el.id = 'next-slide-area';
+ el.addEventListener('click', nextSlide, false);
+ document.querySelector('section.slides').appendChild(el);
+
+ /* Swiping */
+
+ document.body.addEventListener('touchstart', handleTouchStart, false);
+}
+
+/* Hash functions */
+
+function getCurSlideFromHash() {
+ var slideNo = parseInt(location.hash.substr(1));
+
+ if (slideNo) {
+ curSlide = slideNo - 1;
+ } else {
+ curSlide = 0;
+ }
+};
+
+function updateHash() {
+ location.replace('#' + (curSlide + 1));
+};
+
+/* Event listeners */
+
+function handleBodyKeyDown(event) {
+ // If we're in a code element, only handle pgup/down.
+ var inCode = event.target.classList.contains("code");
+
+ switch (event.keyCode) {
+ case 39: // right arrow
+ case 13: // Enter
+ case 32: // space
+ if (inCode) break;
+ case 34: // PgDn
+ nextSlide();
+ event.preventDefault();
+ break;
+
+ case 37: // left arrow
+ case 8: // Backspace
+ if (inCode) break;
+ case 33: // PgUp
+ prevSlide();
+ event.preventDefault();
+ break;
+
+ case 40: // down arrow
+ if (inCode) break;
+ nextSlide();
+ event.preventDefault();
+ break;
+
+ case 38: // up arrow
+ if (inCode) break;
+ prevSlide();
+ event.preventDefault();
+ break;
+ }
+};
+
+function addEventListeners() {
+ document.addEventListener('keydown', handleBodyKeyDown, false);
+};
+
+/* Initialization */
+
+function addFontStyle() {
+ var el = document.createElement('link');
+ el.rel = 'stylesheet';
+ el.type = 'text/css';
+ el.href = '//fonts.googleapis.com/css?family=' +
+ 'Open+Sans:regular,semibold,italic,italicsemibold|Droid+Sans+Mono';
+
+ document.body.appendChild(el);
+};
+
+function addGeneralStyle() {
+ var el = document.createElement('link');
+ el.rel = 'stylesheet';
+ el.type = 'text/css';
+ el.href = PERMANENT_URL_PREFIX + 'styles.css';
+ document.body.appendChild(el);
+
+ var el = document.createElement('meta');
+ el.name = 'viewport';
+ el.content = 'width=1100,height=750';
+ document.querySelector('head').appendChild(el);
+
+ var el = document.createElement('meta');
+ el.name = 'apple-mobile-web-app-capable';
+ el.content = 'yes';
+ document.querySelector('head').appendChild(el);
+};
+
+function addPrintStyle() {
+ var el = document.createElement('link');
+ el.rel = 'stylesheet';
+ el.type = 'text/css';
+ el.media = "print";
+ el.href = PERMANENT_URL_PREFIX + 'print.css';
+ document.body.appendChild(el);
+};
+
+function handleDomLoaded() {
+ slideEls = document.querySelectorAll('section.slides > article');
+
+ setupFrames();
+
+ addFontStyle();
+ addGeneralStyle();
+ addPrintStyle();
+ addEventListeners();
+
+ updateSlides();
+
+ setupInteraction();
+
+ document.body.classList.add('loaded');
+};
+
+function initialize() {
+ getCurSlideFromHash();
+
+ if (window['_DEBUG']) {
+ PERMANENT_URL_PREFIX = '../';
+ }
+
+ if (window['_DCL']) {
+ handleDomLoaded();
+ } else {
+ document.addEventListener('DOMContentLoaded', handleDomLoaded, false);
+ }
+}
+
+// If ?debug exists then load the script relative instead of absolute
+if (!window['_DEBUG'] && document.location.href.indexOf('?debug') !== -1) {
+ document.addEventListener('DOMContentLoaded', function() {
+ // Avoid missing the DomContentLoaded event
+ window['_DCL'] = true
+ }, false);
+
+ window['_DEBUG'] = true;
+ var script = document.createElement('script');
+ script.type = 'text/javascript';
+ script.src = '../slides.js';
+ var s = document.getElementsByTagName('script')[0];
+ s.parentNode.insertBefore(script, s);
+
+ // Remove this script
+ s.parentNode.removeChild(s);
+} else {
+ initialize();
+}
diff --git a/llgo/third_party/go.tools/cmd/present/static/styles.css b/llgo/third_party/go.tools/cmd/present/static/styles.css
new file mode 100644
index 0000000000000000000000000000000000000000..b3d829a2569d18b25d95dbcaac51cd3298e5c12c
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/present/static/styles.css
@@ -0,0 +1,461 @@
+/* Framework */
+
+html {
+ height: 100%;
+}
+
+body {
+ margin: 0;
+ padding: 0;
+
+ display: block !important;
+
+ height: 100%;
+ min-height: 740px;
+
+ overflow-x: hidden;
+ overflow-y: auto;
+
+ background: rgb(215, 215, 215);
+ background: -o-radial-gradient(rgb(240, 240, 240), rgb(190, 190, 190));
+ background: -moz-radial-gradient(rgb(240, 240, 240), rgb(190, 190, 190));
+ background: -webkit-radial-gradient(rgb(240, 240, 240), rgb(190, 190, 190));
+ background: -webkit-gradient(radial, 50% 50%, 0, 50% 50%, 500, from(rgb(240, 240, 240)), to(rgb(190, 190, 190)));
+
+ -webkit-font-smoothing: antialiased;
+}
+
+.slides {
+ width: 100%;
+ height: 100%;
+ left: 0;
+ top: 0;
+
+ position: absolute;
+
+ -webkit-transform: translate3d(0, 0, 0);
+}
+
+.slides > article {
+ display: block;
+
+ position: absolute;
+ overflow: hidden;
+
+ width: 900px;
+ height: 700px;
+
+ left: 50%;
+ top: 50%;
+
+ margin-left: -450px;
+ margin-top: -350px;
+
+ padding: 40px 60px;
+
+ box-sizing: border-box;
+ -o-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ -webkit-box-sizing: border-box;
+
+ border-radius: 10px;
+ -o-border-radius: 10px;
+ -moz-border-radius: 10px;
+ -webkit-border-radius: 10px;
+
+ background-color: white;
+
+ border: 1px solid rgba(0, 0, 0, .3);
+
+ transition: transform .3s ease-out;
+ -o-transition: -o-transform .3s ease-out;
+ -moz-transition: -moz-transform .3s ease-out;
+ -webkit-transition: -webkit-transform .3s ease-out;
+}
+.slides.layout-widescreen > article {
+ margin-left: -550px;
+ width: 1100px;
+}
+.slides.layout-faux-widescreen > article {
+ margin-left: -550px;
+ width: 1100px;
+
+ padding: 40px 160px;
+}
+
+.slides.layout-widescreen > article:not(.nobackground):not(.biglogo),
+.slides.layout-faux-widescreen > article:not(.nobackground):not(.biglogo) {
+ background-position-x: 0, 840px;
+}
+
+/* Clickable/tappable areas */
+
+.slide-area {
+ z-index: 1000;
+
+ position: absolute;
+ left: 0;
+ top: 0;
+ width: 150px;
+ height: 700px;
+
+ left: 50%;
+ top: 50%;
+
+ cursor: pointer;
+ margin-top: -350px;
+
+ tap-highlight-color: transparent;
+ -o-tap-highlight-color: transparent;
+ -moz-tap-highlight-color: transparent;
+ -webkit-tap-highlight-color: transparent;
+}
+#prev-slide-area {
+ margin-left: -550px;
+}
+#next-slide-area {
+ margin-left: 400px;
+}
+.slides.layout-widescreen #prev-slide-area,
+.slides.layout-faux-widescreen #prev-slide-area {
+ margin-left: -650px;
+}
+.slides.layout-widescreen #next-slide-area,
+.slides.layout-faux-widescreen #next-slide-area {
+ margin-left: 500px;
+}
+
+/* Slides */
+
+.slides > article {
+ display: none;
+}
+.slides > article.far-past {
+ display: block;
+ transform: translate(-2040px);
+ -o-transform: translate(-2040px);
+ -moz-transform: translate(-2040px);
+ -webkit-transform: translate3d(-2040px, 0, 0);
+}
+.slides > article.past {
+ display: block;
+ transform: translate(-1020px);
+ -o-transform: translate(-1020px);
+ -moz-transform: translate(-1020px);
+ -webkit-transform: translate3d(-1020px, 0, 0);
+}
+.slides > article.current {
+ display: block;
+ transform: translate(0);
+ -o-transform: translate(0);
+ -moz-transform: translate(0);
+ -webkit-transform: translate3d(0, 0, 0);
+}
+.slides > article.next {
+ display: block;
+ transform: translate(1020px);
+ -o-transform: translate(1020px);
+ -moz-transform: translate(1020px);
+ -webkit-transform: translate3d(1020px, 0, 0);
+}
+.slides > article.far-next {
+ display: block;
+ transform: translate(2040px);
+ -o-transform: translate(2040px);
+ -moz-transform: translate(2040px);
+ -webkit-transform: translate3d(2040px, 0, 0);
+}
+
+.slides.layout-widescreen > article.far-past,
+.slides.layout-faux-widescreen > article.far-past {
+ display: block;
+ transform: translate(-2260px);
+ -o-transform: translate(-2260px);
+ -moz-transform: translate(-2260px);
+ -webkit-transform: translate3d(-2260px, 0, 0);
+}
+.slides.layout-widescreen > article.past,
+.slides.layout-faux-widescreen > article.past {
+ display: block;
+ transform: translate(-1130px);
+ -o-transform: translate(-1130px);
+ -moz-transform: translate(-1130px);
+ -webkit-transform: translate3d(-1130px, 0, 0);
+}
+.slides.layout-widescreen > article.current,
+.slides.layout-faux-widescreen > article.current {
+ display: block;
+ transform: translate(0);
+ -o-transform: translate(0);
+ -moz-transform: translate(0);
+ -webkit-transform: translate3d(0, 0, 0);
+}
+.slides.layout-widescreen > article.next,
+.slides.layout-faux-widescreen > article.next {
+ display: block;
+ transform: translate(1130px);
+ -o-transform: translate(1130px);
+ -moz-transform: translate(1130px);
+ -webkit-transform: translate3d(1130px, 0, 0);
+}
+.slides.layout-widescreen > article.far-next,
+.slides.layout-faux-widescreen > article.far-next {
+ display: block;
+ transform: translate(2260px);
+ -o-transform: translate(2260px);
+ -moz-transform: translate(2260px);
+ -webkit-transform: translate3d(2260px, 0, 0);
+}
+
+/* Styles for slides */
+
+.slides > article {
+ font-family: 'Open Sans', Arial, sans-serif;
+
+ color: black;
+ text-shadow: 0 1px 1px rgba(0, 0, 0, .1);
+
+ font-size: 26px;
+ line-height: 36px;
+
+ letter-spacing: -1px;
+}
+
+b {
+ font-weight: 600;
+}
+
+a {
+ color: rgb(0, 102, 204);
+ text-decoration: none;
+}
+a:visited {
+ color: rgba(0, 102, 204, .75);
+}
+a:hover {
+ color: black;
+}
+
+p {
+ margin: 0;
+ padding: 0;
+
+ margin-top: 20px;
+}
+p:first-child {
+ margin-top: 0;
+}
+
+h1 {
+ font-size: 60px;
+ line-height: 60px;
+
+ padding: 0;
+ margin: 0;
+ margin-top: 200px;
+ margin-bottom: 5px;
+ padding-right: 40px;
+
+ font-weight: 600;
+
+ letter-spacing: -3px;
+
+ color: rgb(51, 51, 51);
+}
+
+h2 {
+ font-size: 45px;
+ line-height: 45px;
+
+ position: absolute;
+ bottom: 150px;
+
+ padding: 0;
+ margin: 0;
+ padding-right: 40px;
+
+ font-weight: 600;
+
+ letter-spacing: -2px;
+
+ color: rgb(51, 51, 51);
+}
+
+h3 {
+ font-size: 30px;
+ line-height: 36px;
+
+ padding: 0;
+ margin: 0;
+ padding-right: 40px;
+
+ font-weight: 600;
+
+ letter-spacing: -1px;
+
+ color: rgb(51, 51, 51);
+}
+
+ul {
+ margin: 0;
+ padding: 0;
+ margin-top: 20px;
+ margin-left: 1.5em;
+}
+li {
+ padding: 0;
+ margin: 0 0 .5em 0;
+}
+
+div.code {
+ padding: 5px 10px;
+ margin-top: 20px;
+ margin-bottom: 20px;
+ overflow: hidden;
+
+ background: rgb(240, 240, 240);
+ border: 1px solid rgb(224, 224, 224);
+}
+pre {
+ margin: 0;
+ padding: 0;
+
+ font-family: 'Droid Sans Mono', 'Courier New', monospace;
+ font-size: 18px;
+ line-height: 24px;
+ letter-spacing: -1px;
+
+ color: black;
+}
+
+code {
+ font-size: 95%;
+ font-family: 'Droid Sans Mono', 'Courier New', monospace;
+
+ color: black;
+}
+
+article > .image {
+ text-align: center;
+ margin-top: 40px;
+}
+
+table {
+ width: 100%;
+ border-collapse: collapse;
+ margin-top: 40px;
+}
+th {
+ font-weight: 600;
+ text-align: left;
+}
+td,
+th {
+ border: 1px solid rgb(224, 224, 224);
+ padding: 5px 10px;
+ vertical-align: top;
+}
+
+p.link {
+ margin-left: 20px;
+}
+
+/* Code */
+div.code {
+ outline: 0px solid transparent;
+}
+div.playground {
+ position: relative;
+}
+div.output {
+ position: absolute;
+ left: 50%;
+ top: 50%;
+ right: 40px;
+ bottom: 40px;
+ background: #202020;
+ padding: 5px 10px;
+ z-index: 2;
+
+ border-radius: 10px;
+ -o-border-radius: 10px;
+ -moz-border-radius: 10px;
+ -webkit-border-radius: 10px;
+
+}
+div.output pre {
+ margin: 0;
+ padding: 0;
+ background: none;
+ border: none;
+ width: 100%;
+ height: 100%;
+ overflow: auto;
+}
+div.output .stdout, div.output pre {
+ color: #e6e6e6;
+}
+div.output .stderr, div.output .error {
+ color: rgb(255, 200, 200);
+}
+div.output .system, div.output .exit {
+ color: rgb(255, 230, 120)
+}
+.buttons {
+ position: relative;
+ float: right;
+ top: -60px;
+ right: 10px;
+}
+div.output .buttons {
+ position: absolute;
+ float: none;
+ top: auto;
+ right: 5px;
+ bottom: 5px;
+}
+
+/* Presenter details */
+.presenter {
+ margin-top: 20px;
+}
+.presenter p,
+.presenter .link {
+ margin: 0;
+ font-size: 28px;
+ line-height: 1.2em;
+}
+
+/* Output resize details */
+.ui-resizable-handle {
+ position: absolute;
+}
+.ui-resizable-n {
+ cursor: n-resize;
+ height: 7px;
+ width: 100%;
+ top: -5px;
+ left: 0;
+}
+.ui-resizable-w {
+ cursor: w-resize;
+ width: 7px;
+ left: -5px;
+ top: 0;
+ height: 100%;
+}
+.ui-resizable-nw {
+ cursor: nw-resize;
+ width: 9px;
+ height: 9px;
+ left: -5px;
+ top: -5px;
+}
+iframe {
+ border: none;
+}
+figcaption {
+ color: #666;
+ text-align: center;
+ font-size: 0.75em;
+}
\ No newline at end of file
diff --git a/llgo/third_party/go.tools/cmd/present/templates/action.tmpl b/llgo/third_party/go.tools/cmd/present/templates/action.tmpl
new file mode 100644
index 0000000000000000000000000000000000000000..2893058d024d47fd3b7b582390a935696ba103b3
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/present/templates/action.tmpl
@@ -0,0 +1,48 @@
+{/*
+This is the action template.
+It determines how the formatting actions are rendered.
+*/}
+
+{{define "section"}}
+ {{.FormattedNumber}} {{.Title}}
+ {{range .Elem}}{{elem $.Template .}}{{end}}
+{{end}}
+
+{{define "list"}}
+
+ {{range .Bullet}}
+ {{style .}}
+ {{end}}
+
+{{end}}
+
+{{define "text"}}
+ {{if .Pre}}
+ {{range .Lines}}{{.}}{{end}}
+ {{else}}
+
+ {{range $i, $l := .Lines}}{{if $i}}{{template "newline"}}
+ {{end}}{{style $l}}{{end}}
+
+ {{end}}
+{{end}}
+
+{{define "code"}}
+ {{.Text}}
+{{end}}
+
+{{define "image"}}
+
+
+
+{{end}}
+
+{{define "iframe"}}
+
+{{end}}
+
+{{define "link"}}{{style .Label}}
{{end}}
+
+{{define "html"}}{{.HTML}}{{end}}
+
+{{define "caption"}}{{style .Text}} {{end}}
\ No newline at end of file
diff --git a/llgo/third_party/go.tools/cmd/present/templates/article.tmpl b/llgo/third_party/go.tools/cmd/present/templates/article.tmpl
new file mode 100644
index 0000000000000000000000000000000000000000..40d1c936420e1948ece9e5d8194c0f5b387f3e69
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/present/templates/article.tmpl
@@ -0,0 +1,58 @@
+{/* This is the article template. It defines how articles are formatted. */}
+
+{{define "root"}}
+
+
+
+ {{.Title}}
+
+
+
+
+
+
+
+
{{.Title}}
+ {{with .Subtitle}}{{.}}{{end}}
+
+
+
+
+
+ {{with .Sections}}
+
+ {{template "TOC" .}}
+
+ {{end}}
+
+ {{range .Sections}}
+ {{elem $.Template .}}
+ {{end}}{{/* of Section block */}}
+
+ {{if .Authors}}
+
Authors
+ {{range .Authors}}
+
+ {{range .Elem}}{{elem $.Template .}}{{end}}
+
+ {{end}}
+ {{end}}
+
+
+
+
+
+{{end}}
+
+{{define "TOC"}}
+
+ {{range .}}
+ {{.Title}}
+ {{with .Sections}}{{template "TOC" .}}{{end}}
+ {{end}}
+
+{{end}}
+
+{{define "newline"}}
+{{/* No automatic line break. Paragraphs are free-form. */}}
+{{end}}
diff --git a/llgo/third_party/go.tools/cmd/present/templates/dir.tmpl b/llgo/third_party/go.tools/cmd/present/templates/dir.tmpl
new file mode 100644
index 0000000000000000000000000000000000000000..aa838683f2f24cd14219ad6d562a15ae1363b81f
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/present/templates/dir.tmpl
@@ -0,0 +1,81 @@
+
+
+
+
+ Talks - The Go Programming Language
+
+
+
+
+
+
+
+
+
+
Go talks
+
+ {{with .Path}}
{{.}} {{end}}
+
+ {{with .Articles}}
+
Articles:
+
+ {{range .}}
+ {{.Name}} : {{.Title}}
+ {{end}}
+
+ {{end}}
+
+ {{with .Slides}}
+
Slide decks:
+
+ {{range .}}
+ {{.Name}} : {{.Title}}
+ {{end}}
+
+ {{end}}
+
+ {{with .Other}}
+
Files:
+
+ {{range .}}
+ {{.Name}}
+ {{end}}
+
+ {{end}}
+
+ {{with .Dirs}}
+
Sub-directories:
+
+ {{range .}}
+ {{.Name}}
+ {{end}}
+
+ {{end}}
+
+
+
+
+
+
+
diff --git a/llgo/third_party/go.tools/cmd/present/templates/slides.tmpl b/llgo/third_party/go.tools/cmd/present/templates/slides.tmpl
new file mode 100644
index 0000000000000000000000000000000000000000..d2abfa188119b3a181e23e5e5506d5c6230022df
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/present/templates/slides.tmpl
@@ -0,0 +1,58 @@
+{/* This is the slide template. It defines how presentations are formatted. */}
+
+{{define "root"}}
+
+
+
+ {{.Title}}
+
+
+
+
+
+
+
+
+
+ {{.Title}}
+ {{with .Subtitle}}{{.}} {{end}}
+ {{if not .Time.IsZero}}{{.Time.Format "2 January 2006"}} {{end}}
+ {{range .Authors}}
+
+ {{range .TextElem}}{{elem $.Template .}}{{end}}
+
+ {{end}}
+
+
+ {{range $i, $s := .Sections}}
+
+
+ {{if $s.Elem}}
+ {{$s.Title}}
+ {{range $s.Elem}}{{elem $.Template .}}{{end}}
+ {{else}}
+ {{$s.Title}}
+ {{end}}
+
+
+ {{end}}{{/* of Slide block */}}
+
+
+ Thank you
+ {{range .Authors}}
+
+ {{range .Elem}}{{elem $.Template .}}{{end}}
+
+ {{end}}
+
+
+
+ {{if .PlayEnabled}}
+
+ {{end}}
+
+{{end}}
+
+{{define "newline"}}
+
+{{end}}
diff --git a/llgo/third_party/go.tools/cmd/ssadump/main.go b/llgo/third_party/go.tools/cmd/ssadump/main.go
new file mode 100644
index 0000000000000000000000000000000000000000..1b02788aed1fd4a2a464509344f077484d92721a
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/ssadump/main.go
@@ -0,0 +1,213 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// ssadump: a tool for displaying and interpreting the SSA form of Go programs.
+package main
+
+import (
+ "flag"
+ "fmt"
+ "go/build"
+ "os"
+ "runtime"
+ "runtime/pprof"
+
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/ssa/interp"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+var buildFlag = flag.String("build", "", `Options controlling the SSA builder.
+The value is a sequence of zero or more of these letters:
+C perform sanity [C]hecking of the SSA form.
+D include [D]ebug info for every function.
+P print [P]ackage inventory.
+F print [F]unction SSA code.
+S log [S]ource locations as SSA builder progresses.
+G use binary object files from gc to provide imports (no code).
+L build distinct packages seria[L]ly instead of in parallel.
+N build [N]aive SSA form: don't replace local loads/stores with registers.
+I build bare [I]nit functions: no init guards or calls to dependent inits.
+`)
+
+var testFlag = flag.Bool("test", false, "Loads test code (*_test.go) for imported packages.")
+
+var runFlag = flag.Bool("run", false, "Invokes the SSA interpreter on the program.")
+
+var interpFlag = flag.String("interp", "", `Options controlling the SSA test interpreter.
+The value is a sequence of zero or more more of these letters:
+R disable [R]ecover() from panic; show interpreter crash instead.
+T [T]race execution of the program. Best for single-threaded programs!
+`)
+
+const usage = `SSA builder and interpreter.
+Usage: ssadump [ ...] ...
+Use -help flag to display options.
+
+Examples:
+% ssadump -build=FPG hello.go # quickly dump SSA form of a single package
+% ssadump -run -interp=T hello.go # interpret a program, with tracing
+% ssadump -run -test unicode -- -test.v # interpret the unicode package's tests, verbosely
+` + loader.FromArgsUsage +
+ `
+When -run is specified, ssadump will run the program.
+The entry point depends on the -test flag:
+if clear, it runs the first package named main.
+if set, it runs the tests of each package.
+`
+
+var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file")
+
+func init() {
+ // If $GOMAXPROCS isn't set, use the full capacity of the machine.
+ // For small machines, use at least 4 threads.
+ if os.Getenv("GOMAXPROCS") == "" {
+ n := runtime.NumCPU()
+ if n < 4 {
+ n = 4
+ }
+ runtime.GOMAXPROCS(n)
+ }
+}
+
+func main() {
+ if err := doMain(); err != nil {
+ fmt.Fprintf(os.Stderr, "ssadump: %s\n", err)
+ os.Exit(1)
+ }
+}
+
+func doMain() error {
+ flag.Parse()
+ args := flag.Args()
+
+ conf := loader.Config{
+ Build: &build.Default,
+ SourceImports: true,
+ }
+ // TODO(adonovan): make go/types choose its default Sizes from
+ // build.Default or a specified *build.Context.
+ var wordSize int64 = 8
+ switch conf.Build.GOARCH {
+ case "386", "arm":
+ wordSize = 4
+ }
+ conf.TypeChecker.Sizes = &types.StdSizes{
+ MaxAlign: 8,
+ WordSize: wordSize,
+ }
+
+ var mode ssa.BuilderMode
+ for _, c := range *buildFlag {
+ switch c {
+ case 'D':
+ mode |= ssa.GlobalDebug
+ case 'P':
+ mode |= ssa.PrintPackages
+ case 'F':
+ mode |= ssa.PrintFunctions
+ case 'S':
+ mode |= ssa.LogSource | ssa.BuildSerially
+ case 'C':
+ mode |= ssa.SanityCheckFunctions
+ case 'N':
+ mode |= ssa.NaiveForm
+ case 'G':
+ conf.SourceImports = false
+ case 'L':
+ mode |= ssa.BuildSerially
+ case 'I':
+ mode |= ssa.BareInits
+ default:
+ return fmt.Errorf("unknown -build option: '%c'", c)
+ }
+ }
+
+ var interpMode interp.Mode
+ for _, c := range *interpFlag {
+ switch c {
+ case 'T':
+ interpMode |= interp.EnableTracing
+ case 'R':
+ interpMode |= interp.DisableRecover
+ default:
+ return fmt.Errorf("unknown -interp option: '%c'", c)
+ }
+ }
+
+ if len(args) == 0 {
+ fmt.Fprint(os.Stderr, usage)
+ os.Exit(1)
+ }
+
+ // Profiling support.
+ if *cpuprofile != "" {
+ f, err := os.Create(*cpuprofile)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ pprof.StartCPUProfile(f)
+ defer pprof.StopCPUProfile()
+ }
+
+ // Use the initial packages from the command line.
+ args, err := conf.FromArgs(args, *testFlag)
+ if err != nil {
+ return err
+ }
+
+ // The interpreter needs the runtime package.
+ if *runFlag {
+ conf.Import("runtime")
+ }
+
+ // Load, parse and type-check the whole program.
+ iprog, err := conf.Load()
+ if err != nil {
+ return err
+ }
+
+ // Create and build SSA-form program representation.
+ prog := ssa.Create(iprog, mode)
+ prog.BuildAll()
+
+ // Run the interpreter.
+ if *runFlag {
+ var main *ssa.Package
+ pkgs := prog.AllPackages()
+ if *testFlag {
+ // If -test, run all packages' tests.
+ if len(pkgs) > 0 {
+ main = prog.CreateTestMainPackage(pkgs...)
+ }
+ if main == nil {
+ return fmt.Errorf("no tests")
+ }
+ } else {
+ // Otherwise, run main.main.
+ for _, pkg := range pkgs {
+ if pkg.Object.Name() == "main" {
+ main = pkg
+ if main.Func("main") == nil {
+ return fmt.Errorf("no func main() in main package")
+ }
+ break
+ }
+ }
+ if main == nil {
+ return fmt.Errorf("no main package")
+ }
+ }
+
+ if runtime.GOARCH != build.Default.GOARCH {
+ return fmt.Errorf("cross-interpretation is not yet supported (target has GOARCH %s, interpreter has %s)",
+ build.Default.GOARCH, runtime.GOARCH)
+ }
+
+ interp.Interpret(main, interpMode, conf.TypeChecker.Sizes, main.Object.Path(), args)
+ }
+ return nil
+}
diff --git a/llgo/third_party/go.tools/cmd/stringer/endtoend_test.go b/llgo/third_party/go.tools/cmd/stringer/endtoend_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..ae2cc0a5a8c2bbe3dfa5e70252595727afbc8f4a
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/stringer/endtoend_test.go
@@ -0,0 +1,99 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+// This file contains a test that compiles and runs each program in testdata
+// after generating the string method for its type. The rule is that for testdata/x.go
+// we run stringer -type X and then compile and run the program. The resulting
+// binary panics if the String method for X is not correct, including for error cases.
+
+func TestEndToEnd(t *testing.T) {
+ dir, err := ioutil.TempDir("", "stringer")
+ defer os.RemoveAll(dir)
+ // Create stringer in temporary directory.
+ stringer := filepath.Join(dir, "stringer.exe")
+ err = run("go", "build", "-o", stringer, "stringer.go")
+ if err != nil {
+ t.Fatalf("building stringer: %s", err)
+ }
+ // Read the testdata directory.
+ fd, err := os.Open("testdata")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer fd.Close()
+ names, err := fd.Readdirnames(-1)
+ if err != nil {
+ t.Fatalf("Readdirnames: %s", err)
+ }
+ // Generate, compile, and run the test programs.
+ for _, name := range names {
+ if !strings.HasSuffix(name, ".go") {
+ t.Errorf("%s is not a Go file", name)
+ continue
+ }
+ // Names are known to be ASCII and long enough.
+ typeName := fmt.Sprintf("%c%s", name[0]+'A'-'a', name[1:len(name)-len(".go")])
+ stringerCompileAndRun(t, dir, stringer, typeName, name)
+ }
+}
+
+// stringerCompileAndRun runs stringer for the named file and compiles and
+// runs the target binary in directory dir. That binary will panic if the String method is incorrect.
+func stringerCompileAndRun(t *testing.T, dir, stringer, typeName, fileName string) {
+ t.Logf("run: %s %s\n", fileName, typeName)
+ source := filepath.Join(dir, fileName)
+ err := copy(source, filepath.Join("testdata", fileName))
+ if err != nil {
+ t.Fatalf("copying file to temporary directory: %s", err)
+ }
+ stringSource := filepath.Join(dir, typeName+"_string.go")
+ // Run stringer in temporary directory.
+ err = run(stringer, "-type", typeName, "-output", stringSource, source)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Run the binary in the temporary directory.
+ err = run("go", "run", stringSource, source)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// copy copies the from file to the to file.
+func copy(to, from string) error {
+ toFd, err := os.Create(to)
+ if err != nil {
+ return err
+ }
+ defer toFd.Close()
+ fromFd, err := os.Open(from)
+ if err != nil {
+ return err
+ }
+ defer fromFd.Close()
+ _, err = io.Copy(toFd, fromFd)
+ return err
+}
+
+// run runs a single command and returns an error if it does not succeed.
+// os/exec should have this function, to be honest.
+func run(name string, arg ...string) error {
+ cmd := exec.Command(name, arg...)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ return cmd.Run()
+}
diff --git a/llgo/third_party/go.tools/cmd/stringer/golden_test.go b/llgo/third_party/go.tools/cmd/stringer/golden_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..f0db09f02557c44319b55354bb19608e6f23487f
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/stringer/golden_test.go
@@ -0,0 +1,289 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains simple golden tests for various examples.
+// Besides validating the results when the implementation changes,
+// it provides a way to look at the generated code without having
+// to execute the print statements in one's head.
+
+package main
+
+import (
+ "strings"
+ "testing"
+)
+
+// Golden represents a test case.
+type Golden struct {
+ name string
+ input string // input; the package clause is provided when running the test.
+ output string // exected output.
+}
+
+var golden = []Golden{
+ {"day", day_in, day_out},
+ {"offset", offset_in, offset_out},
+ {"gap", gap_in, gap_out},
+ {"num", num_in, num_out},
+ {"unum", unum_in, unum_out},
+ {"prime", prime_in, prime_out},
+}
+
+// Each example starts with "type XXX [u]int", with a single space separating them.
+
+// Simple test: enumeration of type int starting at 0.
+const day_in = `type Day int
+const (
+ Monday Day = iota
+ Tuesday
+ Wednesday
+ Thursday
+ Friday
+ Saturday
+ Sunday
+)
+`
+
+const day_out = `
+const _Day_name = "MondayTuesdayWednesdayThursdayFridaySaturdaySunday"
+
+var _Day_index = [...]uint8{6, 13, 22, 30, 36, 44, 50}
+
+func (i Day) String() string {
+ if i < 0 || i >= Day(len(_Day_index)) {
+ return fmt.Sprintf("Day(%d)", i)
+ }
+ hi := _Day_index[i]
+ lo := uint8(0)
+ if i > 0 {
+ lo = _Day_index[i-1]
+ }
+ return _Day_name[lo:hi]
+}
+`
+
+// Enumeration with an offset.
+// Also includes a duplicate.
+const offset_in = `type Number int
+const (
+ _ Number = iota
+ One
+ Two
+ Three
+ AnotherOne = One // Duplicate; note that AnotherOne doesn't appear below.
+)
+`
+
+const offset_out = `
+const _Number_name = "OneTwoThree"
+
+var _Number_index = [...]uint8{3, 6, 11}
+
+func (i Number) String() string {
+ i -= 1
+ if i < 0 || i >= Number(len(_Number_index)) {
+ return fmt.Sprintf("Number(%d)", i+1)
+ }
+ hi := _Number_index[i]
+ lo := uint8(0)
+ if i > 0 {
+ lo = _Number_index[i-1]
+ }
+ return _Number_name[lo:hi]
+}
+`
+
+// Gaps and an offset.
+const gap_in = `type Gap int
+const (
+ Two Gap = 2
+ Three Gap = 3
+ Five Gap = 5
+ Six Gap = 6
+ Seven Gap = 7
+ Eight Gap = 8
+ Nine Gap = 9
+ Eleven Gap = 11
+)
+`
+
+const gap_out = `
+const (
+ _Gap_name_0 = "TwoThree"
+ _Gap_name_1 = "FiveSixSevenEightNine"
+ _Gap_name_2 = "Eleven"
+)
+
+var (
+ _Gap_index_0 = [...]uint8{3, 8}
+ _Gap_index_1 = [...]uint8{4, 7, 12, 17, 21}
+ _Gap_index_2 = [...]uint8{6}
+)
+
+func (i Gap) String() string {
+ switch {
+ case 2 <= i && i <= 3:
+ i -= 2
+ lo := uint8(0)
+ if i > 0 {
+ lo = _Gap_index_0[i-1]
+ }
+ return _Gap_name_0[lo:_Gap_index_0[i]]
+ case 5 <= i && i <= 9:
+ i -= 5
+ lo := uint8(0)
+ if i > 0 {
+ lo = _Gap_index_1[i-1]
+ }
+ return _Gap_name_1[lo:_Gap_index_1[i]]
+ case i == 11:
+ return _Gap_name_2
+ default:
+ return fmt.Sprintf("Gap(%d)", i)
+ }
+}
+`
+
+// Signed integers spanning zero.
+const num_in = `type Num int
+const (
+ m_2 Num = -2 + iota
+ m_1
+ m0
+ m1
+ m2
+)
+`
+
+const num_out = `
+const _Num_name = "m_2m_1m0m1m2"
+
+var _Num_index = [...]uint8{3, 6, 8, 10, 12}
+
+func (i Num) String() string {
+ i -= -2
+ if i < 0 || i >= Num(len(_Num_index)) {
+ return fmt.Sprintf("Num(%d)", i+-2)
+ }
+ hi := _Num_index[i]
+ lo := uint8(0)
+ if i > 0 {
+ lo = _Num_index[i-1]
+ }
+ return _Num_name[lo:hi]
+}
+`
+
+// Unsigned integers spanning zero.
+const unum_in = `type Unum uint
+const (
+ m_2 Unum = iota + 253
+ m_1
+)
+
+const (
+ m0 Unum = iota
+ m1
+ m2
+)
+`
+
+const unum_out = `
+const (
+ _Unum_name_0 = "m0m1m2"
+ _Unum_name_1 = "m_2m_1"
+)
+
+var (
+ _Unum_index_0 = [...]uint8{2, 4, 6}
+ _Unum_index_1 = [...]uint8{3, 6}
+)
+
+func (i Unum) String() string {
+ switch {
+ case 0 <= i && i <= 2:
+ lo := uint8(0)
+ if i > 0 {
+ lo = _Unum_index_0[i-1]
+ }
+ return _Unum_name_0[lo:_Unum_index_0[i]]
+ case 253 <= i && i <= 254:
+ i -= 253
+ lo := uint8(0)
+ if i > 0 {
+ lo = _Unum_index_1[i-1]
+ }
+ return _Unum_name_1[lo:_Unum_index_1[i]]
+ default:
+ return fmt.Sprintf("Unum(%d)", i)
+ }
+}
+`
+
+// Enough gaps to trigger a map implementation of the method.
+// Also includes a duplicate to test that it doesn't cause problems
+const prime_in = `type Prime int
+const (
+ p2 Prime = 2
+ p3 Prime = 3
+ p5 Prime = 5
+ p7 Prime = 7
+ p77 Prime = 7 // Duplicate; note that p77 doesn't appear below.
+ p11 Prime = 11
+ p13 Prime = 13
+ p17 Prime = 17
+ p19 Prime = 19
+ p23 Prime = 23
+ p29 Prime = 29
+ p37 Prime = 31
+ p41 Prime = 41
+ p43 Prime = 43
+)
+`
+
+const prime_out = `
+const _Prime_name = "p2p3p5p7p11p13p17p19p23p29p37p41p43"
+
+var _Prime_map = map[Prime]string{
+ 2: _Prime_name[0:2],
+ 3: _Prime_name[2:4],
+ 5: _Prime_name[4:6],
+ 7: _Prime_name[6:8],
+ 11: _Prime_name[8:11],
+ 13: _Prime_name[11:14],
+ 17: _Prime_name[14:17],
+ 19: _Prime_name[17:20],
+ 23: _Prime_name[20:23],
+ 29: _Prime_name[23:26],
+ 31: _Prime_name[26:29],
+ 41: _Prime_name[29:32],
+ 43: _Prime_name[32:35],
+}
+
+func (i Prime) String() string {
+ if str, ok := _Prime_map[i]; ok {
+ return str
+ }
+ return fmt.Sprintf("Prime(%d)", i)
+}
+`
+
+func TestGolden(t *testing.T) {
+ for _, test := range golden {
+ var g Generator
+ input := "package test\n" + test.input
+ file := test.name + ".go"
+ g.parsePackage(".", []string{file}, input)
+ // Extract the name and type of the constant from the first line.
+ tokens := strings.SplitN(test.input, " ", 3)
+ if len(tokens) != 3 {
+ t.Fatalf("%s: need type declaration on first line", test.name)
+ }
+ g.generate(tokens[1])
+ got := string(g.format())
+ if got != test.output {
+ t.Errorf("%s: got\n====\n%s====\nexpected\n====%s", test.name, got, test.output)
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/stringer/stringer.go b/llgo/third_party/go.tools/cmd/stringer/stringer.go
new file mode 100644
index 0000000000000000000000000000000000000000..760b1b1cfedc398ea252644d56c15cdf61c6db5e
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/stringer/stringer.go
@@ -0,0 +1,652 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Stringer is a tool to automate the creation of methods that satisfy the fmt.Stringer
+// interface. Given the name of a (signed or unsigned) integer type T that has constants
+// defined, stringer will create a new self-contained Go source file implementing
+// func (t T) String() string
+// The file is created in the same package and directory as the package that defines T.
+// It has helpful defaults designed for use with go generate.
+//
+// Stringer works best with constants that are consecutive values such as created using iota,
+// but creates good code regardless. In the future it might also provide custom support for
+// constant sets that are bit patterns.
+//
+// For example, given this snippet,
+//
+// package painkiller
+//
+// type Pill int
+//
+// const (
+// Placebo Pill = iota
+// Aspirin
+// Ibuprofen
+// Paracetamol
+// Acetaminophen = Paracetamol
+// )
+//
+// running this command
+//
+// stringer -type=Pill
+//
+// in the same directory will create the file pill_string.go, in package painkiller,
+// containing a definition of
+//
+// func (Pill) String() string
+//
+// That method will translate the value of a Pill constant to the string representation
+// of the respective constant name, so that the call fmt.Print(painkiller.Aspirin) will
+// print the string "Aspirin".
+//
+// Typically this process would be run using go generate, like this:
+//
+// //go:generate stringer -type=Pill
+//
+// If multiple constants have the same value, the lexically first matching name will
+// be used (in the example, Acetaminophen will print as "Paracetamol").
+//
+// With no arguments, it processes the package in the current directory.
+// Otherwise, the arguments must name a single directory holding a Go package
+// or a set of Go source files that represent a single Go package.
+//
+// The -type flag accepts a comma-separated list of types so a single run can
+// generate methods for multiple types. The default output file is t_string.go,
+// where t is the lower-cased name of the first type listed. It can be overridden
+// with the -output flag.
+//
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+
+ _ "llvm.org/llgo/third_party/go.tools/go/gcimporter"
+)
+
+var (
+ typeNames = flag.String("type", "", "comma-separated list of type names; must be set")
+ output = flag.String("output", "", "output file name; default srcdir/_string.go")
+)
+
+// Usage is a replacement usage function for the flags package.
+func Usage() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T [directory]\n")
+ fmt.Fprintf(os.Stderr, "\tstringer [flags[ -type T files... # Must be a single package\n")
+ fmt.Fprintf(os.Stderr, "For more information, see:\n")
+ fmt.Fprintf(os.Stderr, "\thttp://godoc.org/golang.org/x/tools/cmd/stringer\n")
+ fmt.Fprintf(os.Stderr, "Flags:\n")
+ flag.PrintDefaults()
+ os.Exit(2)
+}
+
+func main() {
+ log.SetFlags(0)
+ log.SetPrefix("stringer: ")
+ flag.Usage = Usage
+ flag.Parse()
+ if len(*typeNames) == 0 {
+ flag.Usage()
+ os.Exit(2)
+ }
+ types := strings.Split(*typeNames, ",")
+
+ // We accept either one directory or a list of files. Which do we have?
+ args := flag.Args()
+ if len(args) == 0 {
+ // Default: process whole package in current directory.
+ args = []string{"."}
+ }
+
+ // Parse the package once.
+ var (
+ dir string
+ g Generator
+ )
+ if len(args) == 1 && isDirectory(args[0]) {
+ dir = args[0]
+ g.parsePackageDir(args[0])
+ } else {
+ dir = filepath.Dir(args[0])
+ g.parsePackageFiles(args)
+ }
+
+ // Print the header and package clause.
+ g.Printf("// generated by stringer %s; DO NOT EDIT\n", strings.Join(os.Args[1:], " "))
+ g.Printf("\n")
+ g.Printf("package %s", g.pkg.name)
+ g.Printf("\n")
+ g.Printf("import \"fmt\"\n") // Used by all methods.
+
+ // Run generate for each type.
+ for _, typeName := range types {
+ g.generate(typeName)
+ }
+
+ // Format the output.
+ src := g.format()
+
+ // Write to file.
+ outputName := *output
+ if outputName == "" {
+ baseName := fmt.Sprintf("%s_string.go", types[0])
+ outputName = filepath.Join(dir, strings.ToLower(baseName))
+ }
+ err := ioutil.WriteFile(outputName, src, 0644)
+ if err != nil {
+ log.Fatalf("writing output: %s", err)
+ }
+}
+
+// isDirectory reports whether the named file is a directory.
+func isDirectory(name string) bool {
+ info, err := os.Stat(name)
+ if err != nil {
+ log.Fatal(err)
+ }
+ return info.IsDir()
+}
+
+// Generator holds the state of the analysis. Primarily used to buffer
+// the output for format.Source.
+type Generator struct {
+ buf bytes.Buffer // Accumulated output.
+ pkg *Package // Package we are scanning.
+}
+
+func (g *Generator) Printf(format string, args ...interface{}) {
+ fmt.Fprintf(&g.buf, format, args...)
+}
+
+// File holds a single parsed file and associated data.
+type File struct {
+ pkg *Package // Package to which this file belongs.
+ file *ast.File // Parsed AST.
+ // These fields are reset for each type being generated.
+ typeName string // Name of the constant type.
+ values []Value // Accumulator for constant values of that type.
+}
+
+type Package struct {
+ dir string
+ name string
+ defs map[*ast.Ident]types.Object
+ files []*File
+ typesPkg *types.Package
+}
+
+// parsePackageDir parses the package residing in the directory.
+func (g *Generator) parsePackageDir(directory string) {
+ pkg, err := build.Default.ImportDir(directory, 0)
+ if err != nil {
+ log.Fatalf("cannot process directory %s: %s", directory, err)
+ }
+ var names []string
+ names = append(names, pkg.GoFiles...)
+ names = append(names, pkg.CgoFiles...)
+ // TODO: Need to think about constants in test files. Maybe write type_string_test.go
+ // in a separate pass? For later.
+ // names = append(names, pkg.TestGoFiles...) // These are also in the "foo" package.
+ names = append(names, pkg.SFiles...)
+ names = prefixDirectory(directory, names)
+ g.parsePackage(directory, names, nil)
+}
+
+// parsePackageFiles parses the package occupying the named files.
+func (g *Generator) parsePackageFiles(names []string) {
+ g.parsePackage(".", names, nil)
+}
+
+// prefixDirectory places the directory name on the beginning of each name in the list.
+func prefixDirectory(directory string, names []string) []string {
+ if directory == "." {
+ return names
+ }
+ ret := make([]string, len(names))
+ for i, name := range names {
+ ret[i] = filepath.Join(directory, name)
+ }
+ return ret
+}
+
+// parsePackage analyzes the single package constructed from the named files.
+// If text is non-nil, it is a string to be used instead of the content of the file,
+// to be used for testing. parsePackage exits if there is an error.
+func (g *Generator) parsePackage(directory string, names []string, text interface{}) {
+ var files []*File
+ var astFiles []*ast.File
+ g.pkg = new(Package)
+ fs := token.NewFileSet()
+ for _, name := range names {
+ if !strings.HasSuffix(name, ".go") {
+ continue
+ }
+ parsedFile, err := parser.ParseFile(fs, name, text, 0)
+ if err != nil {
+ log.Fatalf("parsing package: %s: %s", name, err)
+ }
+ astFiles = append(astFiles, parsedFile)
+ files = append(files, &File{
+ file: parsedFile,
+ pkg: g.pkg,
+ })
+ }
+ if len(astFiles) == 0 {
+ log.Fatalf("%s: no buildable Go files", directory)
+ }
+ g.pkg.name = astFiles[0].Name.Name
+ g.pkg.files = files
+ g.pkg.dir = directory
+ // Type check the package.
+ g.pkg.check(fs, astFiles)
+}
+
+// check type-checks the package. The package must be OK to proceed.
+func (pkg *Package) check(fs *token.FileSet, astFiles []*ast.File) {
+ pkg.defs = make(map[*ast.Ident]types.Object)
+ var config types.Config
+ info := &types.Info{
+ Defs: pkg.defs,
+ }
+ typesPkg, err := config.Check(pkg.dir, fs, astFiles, info)
+ if err != nil {
+ log.Fatalf("checking package: %s", err)
+ }
+ pkg.typesPkg = typesPkg
+}
+
+// generate produces the String method for the named type.
+func (g *Generator) generate(typeName string) {
+ values := make([]Value, 0, 100)
+ for _, file := range g.pkg.files {
+ // Set the state for this run of the walker.
+ file.typeName = typeName
+ file.values = nil
+ if file.file != nil {
+ ast.Inspect(file.file, file.genDecl)
+ values = append(values, file.values...)
+ }
+ }
+
+ if len(values) == 0 {
+ log.Fatalf("no values defined for type %s", typeName)
+ }
+ runs := splitIntoRuns(values)
+ // The decision of which pattern to use depends on the number of
+ // runs in the numbers. If there's only one, it's easy. For more than
+ // one, there's a tradeoff between complexity and size of the data
+ // and code vs. the simplicity of a map. A map takes more space,
+ // but so does the code. The decision here (crossover at 10) is
+ // arbitrary, but considers that for large numbers of runs the cost
+ // of the linear scan in the switch might become important, and
+ // rather than use yet another algorithm such as binary search,
+ // we punt and use a map. In any case, the likelihood of a map
+ // being necessary for any realistic example other than bitmasks
+ // is very low. And bitmasks probably deserve their own analysis,
+ // to be done some other day.
+ switch {
+ case len(runs) == 1:
+ g.buildOneRun(runs, typeName)
+ case len(runs) <= 10:
+ g.buildMultipleRuns(runs, typeName)
+ default:
+ g.buildMap(runs, typeName)
+ }
+}
+
+// splitIntoRuns breaks the values into runs of contiguous sequences.
+// For example, given 1,2,3,5,6,7 it returns {1,2,3},{5,6,7}.
+// The input slice is known to be non-empty.
+func splitIntoRuns(values []Value) [][]Value {
+ // We use stable sort so the lexically first name is chosen for equal elements.
+ sort.Stable(byValue(values))
+ // Remove duplicates. Stable sort has put the one we want to print first,
+ // so use that one. The String method won't care about which named constant
+ // was the argument, so the first name for the given value is the only one to keep.
+ // We need to do this because identical values would cause the switch or map
+ // to fail to compile.
+ j := 1
+ for i := 1; i < len(values); i++ {
+ if values[i].value != values[i-1].value {
+ values[j] = values[i]
+ j++
+ }
+ }
+ values = values[:j]
+ runs := make([][]Value, 0, 10)
+ for len(values) > 0 {
+ // One contiguous sequence per outer loop.
+ i := 1
+ for i < len(values) && values[i].value == values[i-1].value+1 {
+ i++
+ }
+ runs = append(runs, values[:i])
+ values = values[i:]
+ }
+ return runs
+}
+
+// format returns the gofmt-ed contents of the Generator's buffer.
+func (g *Generator) format() []byte {
+ src, err := format.Source(g.buf.Bytes())
+ if err != nil {
+ // Should never happen, but can arise when developing this code.
+ // The user can compile the output to see the error.
+ log.Printf("warning: internal error: invalid Go generated: %s", err)
+ log.Printf("warning: compile the package to analyze the error")
+ return g.buf.Bytes()
+ }
+ return src
+}
+
+// Value represents a declared constant.
+type Value struct {
+ name string // The name of the constant.
+ // The value is stored as a bit pattern alone. The boolean tells us
+ // whether to interpret it as an int64 or a uint64; the only place
+ // this matters is when sorting.
+ // Much of the time the str field is all we need; it is printed
+ // by Value.String.
+ value uint64 // Will be converted to int64 when needed.
+ signed bool // Whether the constant is a signed type.
+ str string // The string representation given by the "go/exact" package.
+}
+
+func (v *Value) String() string {
+ return v.str
+}
+
+// byValue lets us sort the constants into increasing order.
+// We take care in the Less method to sort in signed or unsigned order,
+// as appropriate.
+type byValue []Value
+
+func (b byValue) Len() int { return len(b) }
+func (b byValue) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b byValue) Less(i, j int) bool {
+ if b[i].signed {
+ return int64(b[i].value) < int64(b[j].value)
+ }
+ return b[i].value < b[j].value
+}
+
+// genDecl processes one declaration clause.
+func (f *File) genDecl(node ast.Node) bool {
+ decl, ok := node.(*ast.GenDecl)
+ if !ok || decl.Tok != token.CONST {
+ // We only care about const declarations.
+ return true
+ }
+ // The name of the type of the constants we are declaring.
+ // Can change if this is a multi-element declaration.
+ typ := ""
+ // Loop over the elements of the declaration. Each element is a ValueSpec:
+ // a list of names possibly followed by a type, possibly followed by values.
+ // If the type and value are both missing, we carry down the type (and value,
+ // but the "go/types" package takes care of that).
+ for _, spec := range decl.Specs {
+ vspec := spec.(*ast.ValueSpec) // Guaranteed to succeed as this is CONST.
+ if vspec.Type == nil && len(vspec.Values) > 0 {
+ // "X = 1". With no type but a value, the constant is untyped.
+ // Skip this vspec and reset the remembered type.
+ typ = ""
+ continue
+ }
+ if vspec.Type != nil {
+ // "X T". We have a type. Remember it.
+ ident, ok := vspec.Type.(*ast.Ident)
+ if !ok {
+ continue
+ }
+ typ = ident.Name
+ }
+ if typ != f.typeName {
+ // This is not the type we're looking for.
+ continue
+ }
+ // We now have a list of names (from one line of source code) all being
+ // declared with the desired type.
+ // Grab their names and actual values and store them in f.values.
+ for _, name := range vspec.Names {
+ if name.Name == "_" {
+ continue
+ }
+ // This dance lets the type checker find the values for us. It's a
+ // bit tricky: look up the object declared by the name, find its
+ // types.Const, and extract its value.
+ obj, ok := f.pkg.defs[name]
+ if !ok {
+ log.Fatalf("no value for constant %s", name)
+ }
+ info := obj.Type().Underlying().(*types.Basic).Info()
+ if info&types.IsInteger == 0 {
+ log.Fatalf("can't handle non-integer constant type %s", typ)
+ }
+ value := obj.(*types.Const).Val() // Guaranteed to succeed as this is CONST.
+ if value.Kind() != exact.Int {
+ log.Fatalf("can't happen: constant is not an integer %s", name)
+ }
+ i64, isInt := exact.Int64Val(value)
+ u64, isUint := exact.Uint64Val(value)
+ if !isInt && !isUint {
+ log.Fatalf("internal error: value of %s is not an integer: %s", name, value.String())
+ }
+ if !isInt {
+ u64 = uint64(i64)
+ }
+ v := Value{
+ name: name.Name,
+ value: u64,
+ signed: info&types.IsUnsigned == 0,
+ str: value.String(),
+ }
+ f.values = append(f.values, v)
+ }
+ }
+ return false
+}
+
+// Helpers
+
+// usize returns the number of bits of the smallest unsigned integer
+// type that will hold n. Used to create the smallest possible slice of
+// integers to use as indexes into the concatenated strings.
+func usize(n int) int {
+ switch {
+ case n < 1<<8:
+ return 8
+ case n < 1<<16:
+ return 16
+ default:
+ // 2^32 is enough constants for anyone.
+ return 32
+ }
+}
+
+// declareIndexAndNameVars declares the index slices and concatenated names
+// strings representing the runs of values.
+func (g *Generator) declareIndexAndNameVars(runs [][]Value, typeName string) {
+ var indexes, names []string
+ for i, run := range runs {
+ index, name := g.createIndexAndNameDecl(run, typeName, fmt.Sprintf("_%d", i))
+ indexes = append(indexes, index)
+ names = append(names, name)
+ }
+ g.Printf("const (\n")
+ for _, name := range names {
+ g.Printf("\t%s\n", name)
+ }
+ g.Printf(")\n\n")
+ g.Printf("var (")
+ for _, index := range indexes {
+ g.Printf("\t%s\n", index)
+ }
+ g.Printf(")\n\n")
+}
+
+// declareIndexAndNameVar is the single-run version of declareIndexAndNameVars
+func (g *Generator) declareIndexAndNameVar(run []Value, typeName string) {
+ index, name := g.createIndexAndNameDecl(run, typeName, "")
+ g.Printf("const %s\n", name)
+ g.Printf("var %s\n", index)
+}
+
+// createIndexAndNameDecl returns the pair of declarations for the run. The caller will add "const" and "var".
+func (g *Generator) createIndexAndNameDecl(run []Value, typeName string, suffix string) (string, string) {
+ b := new(bytes.Buffer)
+ indexes := make([]int, len(run))
+ for i := range run {
+ b.WriteString(run[i].name)
+ indexes[i] = b.Len()
+ }
+ nameConst := fmt.Sprintf("_%s_name%s = %q", typeName, suffix, b.String())
+ nameLen := b.Len()
+ b.Reset()
+ fmt.Fprintf(b, "_%s_index%s = [...]uint%d{", typeName, suffix, usize(nameLen))
+ for i, v := range indexes {
+ if i > 0 {
+ fmt.Fprintf(b, ", ")
+ }
+ fmt.Fprintf(b, "%d", v)
+ }
+ fmt.Fprintf(b, "}")
+ return b.String(), nameConst
+}
+
+// declareNameVars declares the concatenated names string representing all the values in the runs.
+func (g *Generator) declareNameVars(runs [][]Value, typeName string, suffix string) {
+ g.Printf("const _%s_name%s = \"", typeName, suffix)
+ for _, run := range runs {
+ for i := range run {
+ g.Printf("%s", run[i].name)
+ }
+ }
+ g.Printf("\"\n")
+}
+
+// buildOneRun generates the variables and String method for a single run of contiguous values.
+func (g *Generator) buildOneRun(runs [][]Value, typeName string) {
+ values := runs[0]
+ g.Printf("\n")
+ g.declareIndexAndNameVar(values, typeName)
+ // The generated code is simple enough to write as a Printf format.
+ lessThanZero := ""
+ if values[0].signed {
+ lessThanZero = "i < 0 || "
+ }
+ if values[0].value == 0 { // Signed or unsigned, 0 is still 0.
+ g.Printf(stringOneRun, typeName, usize(len(values)), lessThanZero)
+ } else {
+ g.Printf(stringOneRunWithOffset, typeName, values[0].String(), usize(len(values)), lessThanZero)
+ }
+}
+
+// Arguments to format are:
+// [1]: type name
+// [2]: size of index element (8 for uint8 etc.)
+// [3]: less than zero check (for signed types)
+const stringOneRun = `func (i %[1]s) String() string {
+ if %[3]si >= %[1]s(len(_%[1]s_index)) {
+ return fmt.Sprintf("%[1]s(%%d)", i)
+ }
+ hi := _%[1]s_index[i]
+ lo := uint%[2]d(0)
+ if i > 0 {
+ lo = _%[1]s_index[i-1]
+ }
+ return _%[1]s_name[lo:hi]
+}
+`
+
+// Arguments to format are:
+// [1]: type name
+// [2]: lowest defined value for type, as a string
+// [3]: size of index element (8 for uint8 etc.)
+// [4]: less than zero check (for signed types)
+/*
+ */
+const stringOneRunWithOffset = `func (i %[1]s) String() string {
+ i -= %[2]s
+ if %[4]si >= %[1]s(len(_%[1]s_index)) {
+ return fmt.Sprintf("%[1]s(%%d)", i + %[2]s)
+ }
+ hi := _%[1]s_index[i]
+ lo := uint%[3]d(0)
+ if i > 0 {
+ lo = _%[1]s_index[i-1]
+ }
+ return _%[1]s_name[lo : hi]
+}
+`
+
+// buildMultipleRuns generates the variables and String method for multiple runs of contiguous values.
+// For this pattern, a single Printf format won't do.
+func (g *Generator) buildMultipleRuns(runs [][]Value, typeName string) {
+ g.Printf("\n")
+ g.declareIndexAndNameVars(runs, typeName)
+ g.Printf("func (i %s) String() string {\n", typeName)
+ g.Printf("\tswitch {\n")
+ for i, values := range runs {
+ if len(values) == 1 {
+ g.Printf("\tcase i == %s:\n", &values[0])
+ g.Printf("\t\treturn _%s_name_%d\n", typeName, i)
+ continue
+ }
+ g.Printf("\tcase %s <= i && i <= %s:\n", &values[0], &values[len(values)-1])
+ if values[0].value != 0 {
+ g.Printf("\t\ti -= %s\n", &values[0])
+ }
+ g.Printf("\t\tlo := uint%d(0)\n", usize(len(values)))
+ g.Printf("\t\tif i > 0 {\n")
+ g.Printf("\t\t\tlo = _%s_index_%d[i-1]\n", typeName, i)
+ g.Printf("\t\t}\n")
+ g.Printf("\t\treturn _%s_name_%d[lo:_%s_index_%d[i]]\n", typeName, i, typeName, i)
+ }
+ g.Printf("\tdefault:\n")
+ g.Printf("\t\treturn fmt.Sprintf(\"%s(%%d)\", i)\n", typeName)
+ g.Printf("\t}\n")
+ g.Printf("}\n")
+}
+
+// buildMap handles the case where the space is so sparse a map is a reasonable fallback.
+// It's a rare situation but has simple code.
+func (g *Generator) buildMap(runs [][]Value, typeName string) {
+ g.Printf("\n")
+ g.declareNameVars(runs, typeName, "")
+ g.Printf("\nvar _%s_map = map[%s]string{\n", typeName, typeName)
+ n := 0
+ for _, values := range runs {
+ for _, value := range values {
+ g.Printf("\t%s: _%s_name[%d:%d],\n", &value, typeName, n, n+len(value.name))
+ n += len(value.name)
+ }
+ }
+ g.Printf("}\n\n")
+ g.Printf(stringMap, typeName)
+}
+
+// Argument to format is the type name.
+const stringMap = `func (i %[1]s) String() string {
+ if str, ok := _%[1]s_map[i]; ok {
+ return str
+ }
+ return fmt.Sprintf("%[1]s(%%d)", i)
+}
+`
diff --git a/llgo/third_party/go.tools/cmd/stringer/testdata/day.go b/llgo/third_party/go.tools/cmd/stringer/testdata/day.go
new file mode 100644
index 0000000000000000000000000000000000000000..35fa8dce8adfdc048eabe53550edf38962e9ce39
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/stringer/testdata/day.go
@@ -0,0 +1,39 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Simple test: enumeration of type int starting at 0.
+
+package main
+
+import "fmt"
+
+type Day int
+
+const (
+ Monday Day = iota
+ Tuesday
+ Wednesday
+ Thursday
+ Friday
+ Saturday
+ Sunday
+)
+
+func main() {
+ ck(Monday, "Monday")
+ ck(Tuesday, "Tuesday")
+ ck(Wednesday, "Wednesday")
+ ck(Thursday, "Thursday")
+ ck(Friday, "Friday")
+ ck(Saturday, "Saturday")
+ ck(Sunday, "Sunday")
+ ck(-127, "Day(-127)")
+ ck(127, "Day(127)")
+}
+
+func ck(day Day, str string) {
+ if fmt.Sprint(day) != str {
+ panic("day.go: " + str)
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/stringer/testdata/gap.go b/llgo/third_party/go.tools/cmd/stringer/testdata/gap.go
new file mode 100644
index 0000000000000000000000000000000000000000..bc8a90c54735d2e6ad27a8b4cab76ea24f738c43
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/stringer/testdata/gap.go
@@ -0,0 +1,44 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Gaps and an offset.
+
+package main
+
+import "fmt"
+
+type Gap int
+
+const (
+ Two Gap = 2
+ Three Gap = 3
+ Five Gap = 5
+ Six Gap = 6
+ Seven Gap = 7
+ Eight Gap = 8
+ Nine Gap = 9
+ Eleven Gap = 11
+)
+
+func main() {
+ ck(0, "Gap(0)")
+ ck(1, "Gap(1)")
+ ck(Two, "Two")
+ ck(Three, "Three")
+ ck(4, "Gap(4)")
+ ck(Five, "Five")
+ ck(Six, "Six")
+ ck(Seven, "Seven")
+ ck(Eight, "Eight")
+ ck(Nine, "Nine")
+ ck(10, "Gap(10)")
+ ck(Eleven, "Eleven")
+ ck(12, "Gap(12)")
+}
+
+func ck(gap Gap, str string) {
+ if fmt.Sprint(gap) != str {
+ panic("gap.go: " + str)
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/stringer/testdata/num.go b/llgo/third_party/go.tools/cmd/stringer/testdata/num.go
new file mode 100644
index 0000000000000000000000000000000000000000..0d5ab1070766357972d8f524a3390c4ac1957087
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/stringer/testdata/num.go
@@ -0,0 +1,35 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Signed integers spanning zero.
+
+package main
+
+import "fmt"
+
+type Num int
+
+const (
+ m_2 Num = -2 + iota
+ m_1
+ m0
+ m1
+ m2
+)
+
+func main() {
+ ck(-3, "Num(-3)")
+ ck(m_2, "m_2")
+ ck(m_1, "m_1")
+ ck(m0, "m0")
+ ck(m1, "m1")
+ ck(m2, "m2")
+ ck(3, "Num(3)")
+}
+
+func ck(num Num, str string) {
+ if fmt.Sprint(num) != str {
+ panic("num.go: " + str)
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/stringer/testdata/number.go b/llgo/third_party/go.tools/cmd/stringer/testdata/number.go
new file mode 100644
index 0000000000000000000000000000000000000000..7f1c8246c038d4fc76d46f68b76f6d6a1bbe8e3c
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/stringer/testdata/number.go
@@ -0,0 +1,34 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Enumeration with an offset.
+// Also includes a duplicate.
+
+package main
+
+import "fmt"
+
+type Number int
+
+const (
+ _ Number = iota
+ One
+ Two
+ Three
+ AnotherOne = One // Duplicate; note that AnotherOne doesn't appear below.
+)
+
+func main() {
+ ck(One, "One")
+ ck(Two, "Two")
+ ck(Three, "Three")
+ ck(AnotherOne, "One")
+ ck(127, "Number(127)")
+}
+
+func ck(num Number, str string) {
+ if fmt.Sprint(num) != str {
+ panic("number.go: " + str)
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/stringer/testdata/prime.go b/llgo/third_party/go.tools/cmd/stringer/testdata/prime.go
new file mode 100644
index 0000000000000000000000000000000000000000..f551a1a0bbf9d15777b6569593240bb5234ec74e
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/stringer/testdata/prime.go
@@ -0,0 +1,56 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Enough gaps to trigger a map implementation of the method.
+// Also includes a duplicate to test that it doesn't cause problems
+
+package main
+
+import "fmt"
+
+type Prime int
+
+const (
+ p2 Prime = 2
+ p3 Prime = 3
+ p5 Prime = 5
+ p7 Prime = 7
+ p77 Prime = 7 // Duplicate; note that p77 doesn't appear below.
+ p11 Prime = 11
+ p13 Prime = 13
+ p17 Prime = 17
+ p19 Prime = 19
+ p23 Prime = 23
+ p29 Prime = 29
+ p37 Prime = 31
+ p41 Prime = 41
+ p43 Prime = 43
+)
+
+func main() {
+ ck(0, "Prime(0)")
+ ck(1, "Prime(1)")
+ ck(p2, "p2")
+ ck(p3, "p3")
+ ck(4, "Prime(4)")
+ ck(p5, "p5")
+ ck(p7, "p7")
+ ck(p77, "p7")
+ ck(p11, "p11")
+ ck(p13, "p13")
+ ck(p17, "p17")
+ ck(p19, "p19")
+ ck(p23, "p23")
+ ck(p29, "p29")
+ ck(p37, "p37")
+ ck(p41, "p41")
+ ck(p43, "p43")
+ ck(44, "Prime(44)")
+}
+
+func ck(prime Prime, str string) {
+ if fmt.Sprint(prime) != str {
+ panic("prime.go: " + str)
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/stringer/testdata/unum.go b/llgo/third_party/go.tools/cmd/stringer/testdata/unum.go
new file mode 100644
index 0000000000000000000000000000000000000000..2f8508f49cd69212d4291b6f278ac6946259eae3
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/stringer/testdata/unum.go
@@ -0,0 +1,38 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Unsigned integers spanning zero.
+
+package main
+
+import "fmt"
+
+type Unum uint8
+
+const (
+ m_2 Unum = iota + 253
+ m_1
+)
+
+const (
+ m0 Unum = iota
+ m1
+ m2
+)
+
+func main() {
+ ck(^Unum(0)-3, "Unum(252)")
+ ck(m_2, "m_2")
+ ck(m_1, "m_1")
+ ck(m0, "m0")
+ ck(m1, "m1")
+ ck(m2, "m2")
+ ck(3, "Unum(3)")
+}
+
+func ck(unum Unum, str string) {
+ if fmt.Sprint(unum) != str {
+ panic("unum.go: " + str)
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/stringer/util_test.go b/llgo/third_party/go.tools/cmd/stringer/util_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..1aeba6e681d10d843d38a79babb2cee412431a04
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/stringer/util_test.go
@@ -0,0 +1,77 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for some of the internal functions.
+
+package main
+
+import (
+ "fmt"
+ "testing"
+)
+
+// Helpers to save typing in the test cases.
+type u []uint64
+type uu [][]uint64
+
+type SplitTest struct {
+ input u
+ output uu
+ signed bool
+}
+
+var (
+ m2 = uint64(2)
+ m1 = uint64(1)
+ m0 = uint64(0)
+ m_1 = ^uint64(0) // -1 when signed.
+ m_2 = ^uint64(0) - 1 // -2 when signed.
+)
+
+var splitTests = []SplitTest{
+ // No need for a test for the empty case; that's picked off before splitIntoRuns.
+ // Single value.
+ {u{1}, uu{u{1}}, false},
+ // Out of order.
+ {u{3, 2, 1}, uu{u{1, 2, 3}}, true},
+ // Out of order.
+ {u{3, 2, 1}, uu{u{1, 2, 3}}, false},
+ // A gap at the beginning.
+ {u{1, 33, 32, 31}, uu{u{1}, u{31, 32, 33}}, true},
+ // A gap in the middle, in mixed order.
+ {u{33, 7, 32, 31, 9, 8}, uu{u{7, 8, 9}, u{31, 32, 33}}, true},
+ // Gaps throughout
+ {u{33, 44, 1, 32, 45, 31}, uu{u{1}, u{31, 32, 33}, u{44, 45}}, true},
+ // Unsigned values spanning 0.
+ {u{m1, m0, m_1, m2, m_2}, uu{u{m0, m1, m2}, u{m_2, m_1}}, false},
+ // Signed values spanning 0
+ {u{m1, m0, m_1, m2, m_2}, uu{u{m_2, m_1, m0, m1, m2}}, true},
+}
+
+func TestSplitIntoRuns(t *testing.T) {
+Outer:
+ for n, test := range splitTests {
+ values := make([]Value, len(test.input))
+ for i, v := range test.input {
+ values[i] = Value{"", v, test.signed, fmt.Sprint(v)}
+ }
+ runs := splitIntoRuns(values)
+ if len(runs) != len(test.output) {
+ t.Errorf("#%d: %v: got %d runs; expected %d", n, test.input, len(runs), len(test.output))
+ continue
+ }
+ for i, run := range runs {
+ if len(run) != len(test.output[i]) {
+ t.Errorf("#%d: got %v; expected %v", n, runs, test.output)
+ continue Outer
+ }
+ for j, v := range run {
+ if v.value != test.output[i][j] {
+ t.Errorf("#%d: got %v; expected %v", n, runs, test.output)
+ continue Outer
+ }
+ }
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/asmdecl.go b/llgo/third_party/go.tools/cmd/vet/asmdecl.go
new file mode 100644
index 0000000000000000000000000000000000000000..954ffbd949b20961321e0149ddf1511a15109170
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/asmdecl.go
@@ -0,0 +1,650 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Identify mismatches between assembly files and Go func declarations.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// 'kind' is a kind of assembly variable.
+// The kinds 1, 2, 4, 8 stand for values of that size.
+type asmKind int
+
+// These special kinds are not valid sizes.
+const (
+ asmString asmKind = 100 + iota
+ asmSlice
+ asmInterface
+ asmEmptyInterface
+)
+
+// An asmArch describes assembly parameters for an architecture
+type asmArch struct {
+ name string
+ ptrSize int
+ intSize int
+ maxAlign int
+ bigEndian bool
+ stack string
+ lr bool
+}
+
+// An asmFunc describes the expected variables for a function on a given architecture.
+type asmFunc struct {
+ arch *asmArch
+ size int // size of all arguments
+ vars map[string]*asmVar
+ varByOffset map[int]*asmVar
+}
+
+// An asmVar describes a single assembly variable.
+type asmVar struct {
+ name string
+ kind asmKind
+ typ string
+ off int
+ size int
+ inner []*asmVar
+}
+
+var (
+ asmArch386 = asmArch{"386", 4, 4, 4, false, "SP", false}
+ asmArchArm = asmArch{"arm", 4, 4, 4, false, "R13", true}
+ asmArchAmd64 = asmArch{"amd64", 8, 8, 8, false, "SP", false}
+ asmArchAmd64p32 = asmArch{"amd64p32", 4, 4, 8, false, "SP", false}
+ asmArchPower64 = asmArch{"power64", 8, 8, 8, true, "R1", true}
+ asmArchPower64LE = asmArch{"power64le", 8, 8, 8, false, "R1", true}
+
+ arches = []*asmArch{
+ &asmArch386,
+ &asmArchArm,
+ &asmArchAmd64,
+ &asmArchAmd64p32,
+ &asmArchPower64,
+ &asmArchPower64LE,
+ }
+)
+
+var (
+ re = regexp.MustCompile
+ asmPlusBuild = re(`//\s+\+build\s+([^\n]+)`)
+ asmTEXT = re(`\bTEXT\b.*·([^\(]+)\(SB\)(?:\s*,\s*([0-9A-Z|+]+))?(?:\s*,\s*\$(-?[0-9]+)(?:-([0-9]+))?)?`)
+ asmDATA = re(`\b(DATA|GLOBL)\b`)
+ asmNamedFP = re(`([a-zA-Z0-9_\xFF-\x{10FFFF}]+)(?:\+([0-9]+))\(FP\)`)
+ asmUnnamedFP = re(`[^+\-0-9]](([0-9]+)\(FP\))`)
+ asmSP = re(`[^+\-0-9](([0-9]+)\(([A-Z0-9]+)\))`)
+ asmOpcode = re(`^\s*(?:[A-Z0-9a-z_]+:)?\s*([A-Z]+)\s*([^,]*)(?:,\s*(.*))?`)
+ power64Suff = re(`([BHWD])(ZU|Z|U|BR)?$`)
+)
+
+func asmCheck(pkg *Package) {
+ if !vet("asmdecl") {
+ return
+ }
+
+ // No work if no assembly files.
+ if !pkg.hasFileWithSuffix(".s") {
+ return
+ }
+
+ // Gather declarations. knownFunc[name][arch] is func description.
+ knownFunc := make(map[string]map[string]*asmFunc)
+
+ for _, f := range pkg.files {
+ if f.file != nil {
+ for _, decl := range f.file.Decls {
+ if decl, ok := decl.(*ast.FuncDecl); ok && decl.Body == nil {
+ knownFunc[decl.Name.Name] = f.asmParseDecl(decl)
+ }
+ }
+ }
+ }
+
+ for _, f := range pkg.files {
+ if !strings.HasSuffix(f.name, ".s") {
+ continue
+ }
+ Println("Checking file", f.name)
+
+ // Determine architecture from file name if possible.
+ var arch string
+ var archDef *asmArch
+ for _, a := range arches {
+ if strings.HasSuffix(f.name, "_"+a.name+".s") {
+ arch = a.name
+ archDef = a
+ break
+ }
+ }
+
+ lines := strings.SplitAfter(string(f.content), "\n")
+ var (
+ fn *asmFunc
+ fnName string
+ localSize, argSize int
+ wroteSP bool
+ haveRetArg bool
+ retLine []int
+ )
+
+ flushRet := func() {
+ if fn != nil && fn.vars["ret"] != nil && !haveRetArg && len(retLine) > 0 {
+ v := fn.vars["ret"]
+ for _, line := range retLine {
+ f.Badf(token.NoPos, "%s:%d: [%s] %s: RET without writing to %d-byte ret+%d(FP)", f.name, line, arch, fnName, v.size, v.off)
+ }
+ }
+ retLine = nil
+ }
+ for lineno, line := range lines {
+ lineno++
+
+ badf := func(format string, args ...interface{}) {
+ f.Badf(token.NoPos, "%s:%d: [%s] %s: %s", f.name, lineno, arch, fnName, fmt.Sprintf(format, args...))
+ }
+
+ if arch == "" {
+ // Determine architecture from +build line if possible.
+ if m := asmPlusBuild.FindStringSubmatch(line); m != nil {
+ Fields:
+ for _, fld := range strings.Fields(m[1]) {
+ for _, a := range arches {
+ if a.name == fld {
+ arch = a.name
+ archDef = a
+ break Fields
+ }
+ }
+ }
+ }
+ }
+
+ if m := asmTEXT.FindStringSubmatch(line); m != nil {
+ flushRet()
+ if arch == "" {
+ f.Warnf(token.NoPos, "%s: cannot determine architecture for assembly file", f.name)
+ return
+ }
+ fnName = m[1]
+ fn = knownFunc[m[1]][arch]
+ if fn != nil {
+ size, _ := strconv.Atoi(m[4])
+ if size != fn.size && (m[2] != "7" && !strings.Contains(m[2], "NOSPLIT") || size != 0) {
+ badf("wrong argument size %d; expected $...-%d", size, fn.size)
+ }
+ }
+ localSize, _ = strconv.Atoi(m[3])
+ localSize += archDef.intSize
+ if archDef.lr {
+ // Account for caller's saved LR
+ localSize += archDef.intSize
+ }
+ argSize, _ = strconv.Atoi(m[4])
+ wroteSP = false
+ haveRetArg = false
+ continue
+ } else if strings.Contains(line, "TEXT") && strings.Contains(line, "SB") {
+ // function, but not visible from Go (didn't match asmTEXT), so stop checking
+ flushRet()
+ fn = nil
+ fnName = ""
+ continue
+ }
+
+ if strings.Contains(line, "RET") {
+ retLine = append(retLine, lineno)
+ }
+
+ if fnName == "" {
+ continue
+ }
+
+ if asmDATA.FindStringSubmatch(line) != nil {
+ fn = nil
+ }
+
+ if archDef == nil {
+ continue
+ }
+
+ if strings.Contains(line, ", "+archDef.stack) || strings.Contains(line, ",\t"+archDef.stack) {
+ wroteSP = true
+ continue
+ }
+
+ for _, m := range asmSP.FindAllStringSubmatch(line, -1) {
+ if m[3] != archDef.stack || wroteSP {
+ continue
+ }
+ off := 0
+ if m[1] != "" {
+ off, _ = strconv.Atoi(m[2])
+ }
+ if off >= localSize {
+ if fn != nil {
+ v := fn.varByOffset[off-localSize]
+ if v != nil {
+ badf("%s should be %s+%d(FP)", m[1], v.name, off-localSize)
+ continue
+ }
+ }
+ if off >= localSize+argSize {
+ badf("use of %s points beyond argument frame", m[1])
+ continue
+ }
+ badf("use of %s to access argument frame", m[1])
+ }
+ }
+
+ if fn == nil {
+ continue
+ }
+
+ for _, m := range asmUnnamedFP.FindAllStringSubmatch(line, -1) {
+ badf("use of unnamed argument %s", m[1])
+ }
+
+ for _, m := range asmNamedFP.FindAllStringSubmatch(line, -1) {
+ name := m[1]
+ off := 0
+ if m[2] != "" {
+ off, _ = strconv.Atoi(m[2])
+ }
+ if name == "ret" || strings.HasPrefix(name, "ret_") {
+ haveRetArg = true
+ }
+ v := fn.vars[name]
+ if v == nil {
+ // Allow argframe+0(FP).
+ if name == "argframe" && off == 0 {
+ continue
+ }
+ v = fn.varByOffset[off]
+ if v != nil {
+ badf("unknown variable %s; offset %d is %s+%d(FP)", name, off, v.name, v.off)
+ } else {
+ badf("unknown variable %s", name)
+ }
+ continue
+ }
+ asmCheckVar(badf, fn, line, m[0], off, v)
+ }
+ }
+ flushRet()
+ }
+}
+
+// asmParseDecl parses a function decl for expected assembly variables.
+func (f *File) asmParseDecl(decl *ast.FuncDecl) map[string]*asmFunc {
+ var (
+ arch *asmArch
+ fn *asmFunc
+ offset int
+ failed bool
+ )
+
+ addVar := func(outer string, v asmVar) {
+ if vo := fn.vars[outer]; vo != nil {
+ vo.inner = append(vo.inner, &v)
+ }
+ fn.vars[v.name] = &v
+ for i := 0; i < v.size; i++ {
+ fn.varByOffset[v.off+i] = &v
+ }
+ }
+
+ addParams := func(list []*ast.Field) {
+ for i, fld := range list {
+ // Determine alignment, size, and kind of type in declaration.
+ var align, size int
+ var kind asmKind
+ names := fld.Names
+ typ := f.gofmt(fld.Type)
+ switch t := fld.Type.(type) {
+ default:
+ switch typ {
+ default:
+ f.Warnf(fld.Type.Pos(), "unknown assembly argument type %s", typ)
+ failed = true
+ return
+ case "int8", "uint8", "byte", "bool":
+ size = 1
+ case "int16", "uint16":
+ size = 2
+ case "int32", "uint32", "float32":
+ size = 4
+ case "int64", "uint64", "float64":
+ align = arch.maxAlign
+ size = 8
+ case "int", "uint":
+ size = arch.intSize
+ case "uintptr", "iword", "Word", "Errno", "unsafe.Pointer":
+ size = arch.ptrSize
+ case "string", "ErrorString":
+ size = arch.ptrSize * 2
+ align = arch.ptrSize
+ kind = asmString
+ }
+ case *ast.ChanType, *ast.FuncType, *ast.MapType, *ast.StarExpr:
+ size = arch.ptrSize
+ case *ast.InterfaceType:
+ align = arch.ptrSize
+ size = 2 * arch.ptrSize
+ if len(t.Methods.List) > 0 {
+ kind = asmInterface
+ } else {
+ kind = asmEmptyInterface
+ }
+ case *ast.ArrayType:
+ if t.Len == nil {
+ size = arch.ptrSize + 2*arch.intSize
+ align = arch.ptrSize
+ kind = asmSlice
+ break
+ }
+ f.Warnf(fld.Type.Pos(), "unsupported assembly argument type %s", typ)
+ failed = true
+ case *ast.StructType:
+ f.Warnf(fld.Type.Pos(), "unsupported assembly argument type %s", typ)
+ failed = true
+ }
+ if align == 0 {
+ align = size
+ }
+ if kind == 0 {
+ kind = asmKind(size)
+ }
+ offset += -offset & (align - 1)
+
+ // Create variable for each name being declared with this type.
+ if len(names) == 0 {
+ name := "unnamed"
+ if decl.Type.Results != nil && len(decl.Type.Results.List) > 0 && &list[0] == &decl.Type.Results.List[0] && i == 0 {
+ // Assume assembly will refer to single unnamed result as r.
+ name = "ret"
+ }
+ names = []*ast.Ident{{Name: name}}
+ }
+ for _, id := range names {
+ name := id.Name
+ addVar("", asmVar{
+ name: name,
+ kind: kind,
+ typ: typ,
+ off: offset,
+ size: size,
+ })
+ switch kind {
+ case 8:
+ if arch.ptrSize == 4 {
+ w1, w2 := "lo", "hi"
+ if arch.bigEndian {
+ w1, w2 = w2, w1
+ }
+ addVar(name, asmVar{
+ name: name + "_" + w1,
+ kind: 4,
+ typ: "half " + typ,
+ off: offset,
+ size: 4,
+ })
+ addVar(name, asmVar{
+ name: name + "_" + w2,
+ kind: 4,
+ typ: "half " + typ,
+ off: offset + 4,
+ size: 4,
+ })
+ }
+
+ case asmEmptyInterface:
+ addVar(name, asmVar{
+ name: name + "_type",
+ kind: asmKind(arch.ptrSize),
+ typ: "interface type",
+ off: offset,
+ size: arch.ptrSize,
+ })
+ addVar(name, asmVar{
+ name: name + "_data",
+ kind: asmKind(arch.ptrSize),
+ typ: "interface data",
+ off: offset + arch.ptrSize,
+ size: arch.ptrSize,
+ })
+
+ case asmInterface:
+ addVar(name, asmVar{
+ name: name + "_itable",
+ kind: asmKind(arch.ptrSize),
+ typ: "interface itable",
+ off: offset,
+ size: arch.ptrSize,
+ })
+ addVar(name, asmVar{
+ name: name + "_data",
+ kind: asmKind(arch.ptrSize),
+ typ: "interface data",
+ off: offset + arch.ptrSize,
+ size: arch.ptrSize,
+ })
+
+ case asmSlice:
+ addVar(name, asmVar{
+ name: name + "_base",
+ kind: asmKind(arch.ptrSize),
+ typ: "slice base",
+ off: offset,
+ size: arch.ptrSize,
+ })
+ addVar(name, asmVar{
+ name: name + "_len",
+ kind: asmKind(arch.intSize),
+ typ: "slice len",
+ off: offset + arch.ptrSize,
+ size: arch.intSize,
+ })
+ addVar(name, asmVar{
+ name: name + "_cap",
+ kind: asmKind(arch.intSize),
+ typ: "slice cap",
+ off: offset + arch.ptrSize + arch.intSize,
+ size: arch.intSize,
+ })
+
+ case asmString:
+ addVar(name, asmVar{
+ name: name + "_base",
+ kind: asmKind(arch.ptrSize),
+ typ: "string base",
+ off: offset,
+ size: arch.ptrSize,
+ })
+ addVar(name, asmVar{
+ name: name + "_len",
+ kind: asmKind(arch.intSize),
+ typ: "string len",
+ off: offset + arch.ptrSize,
+ size: arch.intSize,
+ })
+ }
+ offset += size
+ }
+ }
+ }
+
+ m := make(map[string]*asmFunc)
+ for _, arch = range arches {
+ fn = &asmFunc{
+ arch: arch,
+ vars: make(map[string]*asmVar),
+ varByOffset: make(map[int]*asmVar),
+ }
+ offset = 0
+ addParams(decl.Type.Params.List)
+ if decl.Type.Results != nil && len(decl.Type.Results.List) > 0 {
+ offset += -offset & (arch.maxAlign - 1)
+ addParams(decl.Type.Results.List)
+ }
+ fn.size = offset
+ m[arch.name] = fn
+ }
+
+ if failed {
+ return nil
+ }
+ return m
+}
+
+// asmCheckVar checks a single variable reference.
+func asmCheckVar(badf func(string, ...interface{}), fn *asmFunc, line, expr string, off int, v *asmVar) {
+ m := asmOpcode.FindStringSubmatch(line)
+ if m == nil {
+ if !strings.HasPrefix(strings.TrimSpace(line), "//") {
+ badf("cannot find assembly opcode")
+ }
+ return
+ }
+
+ // Determine operand sizes from instruction.
+ // Typically the suffix suffices, but there are exceptions.
+ var src, dst, kind asmKind
+ op := m[1]
+ switch fn.arch.name + "." + op {
+ case "386.FMOVLP":
+ src, dst = 8, 4
+ case "arm.MOVD":
+ src = 8
+ case "arm.MOVW":
+ src = 4
+ case "arm.MOVH", "arm.MOVHU":
+ src = 2
+ case "arm.MOVB", "arm.MOVBU":
+ src = 1
+ // LEA* opcodes don't really read the second arg.
+ // They just take the address of it.
+ case "386.LEAL":
+ dst = 4
+ case "amd64.LEAQ":
+ dst = 8
+ case "amd64p32.LEAL":
+ dst = 4
+ default:
+ switch fn.arch.name {
+ case "386", "amd64":
+ if strings.HasPrefix(op, "F") && (strings.HasSuffix(op, "D") || strings.HasSuffix(op, "DP")) {
+ // FMOVDP, FXCHD, etc
+ src = 8
+ break
+ }
+ if strings.HasPrefix(op, "F") && (strings.HasSuffix(op, "F") || strings.HasSuffix(op, "FP")) {
+ // FMOVFP, FXCHF, etc
+ src = 4
+ break
+ }
+ if strings.HasSuffix(op, "SD") {
+ // MOVSD, SQRTSD, etc
+ src = 8
+ break
+ }
+ if strings.HasSuffix(op, "SS") {
+ // MOVSS, SQRTSS, etc
+ src = 4
+ break
+ }
+ if strings.HasPrefix(op, "SET") {
+ // SETEQ, etc
+ src = 1
+ break
+ }
+ switch op[len(op)-1] {
+ case 'B':
+ src = 1
+ case 'W':
+ src = 2
+ case 'L':
+ src = 4
+ case 'D', 'Q':
+ src = 8
+ }
+ case "power64", "power64le":
+ // Strip standard suffixes to reveal size letter.
+ m := power64Suff.FindStringSubmatch(op)
+ if m != nil {
+ switch m[1][0] {
+ case 'B':
+ src = 1
+ case 'H':
+ src = 2
+ case 'W':
+ src = 4
+ case 'D':
+ src = 8
+ }
+ }
+ }
+ }
+ if dst == 0 {
+ dst = src
+ }
+
+ // Determine whether the match we're holding
+ // is the first or second argument.
+ if strings.Index(line, expr) > strings.Index(line, ",") {
+ kind = dst
+ } else {
+ kind = src
+ }
+
+ vk := v.kind
+ vt := v.typ
+ switch vk {
+ case asmInterface, asmEmptyInterface, asmString, asmSlice:
+ // allow reference to first word (pointer)
+ vk = v.inner[0].kind
+ vt = v.inner[0].typ
+ }
+
+ if off != v.off {
+ var inner bytes.Buffer
+ for i, vi := range v.inner {
+ if len(v.inner) > 1 {
+ fmt.Fprintf(&inner, ",")
+ }
+ fmt.Fprintf(&inner, " ")
+ if i == len(v.inner)-1 {
+ fmt.Fprintf(&inner, "or ")
+ }
+ fmt.Fprintf(&inner, "%s+%d(FP)", vi.name, vi.off)
+ }
+ badf("invalid offset %s; expected %s+%d(FP)%s", expr, v.name, v.off, inner.String())
+ return
+ }
+ if kind != 0 && kind != vk {
+ var inner bytes.Buffer
+ if len(v.inner) > 0 {
+ fmt.Fprintf(&inner, " containing")
+ for i, vi := range v.inner {
+ if i > 0 && len(v.inner) > 2 {
+ fmt.Fprintf(&inner, ",")
+ }
+ fmt.Fprintf(&inner, " ")
+ if i > 0 && i == len(v.inner)-1 {
+ fmt.Fprintf(&inner, "and ")
+ }
+ fmt.Fprintf(&inner, "%s+%d(FP)", vi.name, vi.off)
+ }
+ }
+ badf("invalid %s of %s; %s is %d-byte value%s", op, expr, vt, vk, inner.String())
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/assign.go b/llgo/third_party/go.tools/cmd/vet/assign.go
new file mode 100644
index 0000000000000000000000000000000000000000..54c1ae1fdc3f9142699a7a36bceb7bbed135b904
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/assign.go
@@ -0,0 +1,49 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+This file contains the code to check for useless assignments.
+*/
+
+package main
+
+import (
+ "go/ast"
+ "go/token"
+ "reflect"
+)
+
+func init() {
+ register("assign",
+ "check for useless assignments",
+ checkAssignStmt,
+ assignStmt)
+}
+
+// TODO: should also check for assignments to struct fields inside methods
+// that are on T instead of *T.
+
+// checkAssignStmt checks for assignments of the form " = ".
+// These are almost always useless, and even when they aren't they are usually a mistake.
+func checkAssignStmt(f *File, node ast.Node) {
+ stmt := node.(*ast.AssignStmt)
+ if stmt.Tok != token.ASSIGN {
+ return // ignore :=
+ }
+ if len(stmt.Lhs) != len(stmt.Rhs) {
+ // If LHS and RHS have different cardinality, they can't be the same.
+ return
+ }
+ for i, lhs := range stmt.Lhs {
+ rhs := stmt.Rhs[i]
+ if reflect.TypeOf(lhs) != reflect.TypeOf(rhs) {
+ continue // short-circuit the heavy-weight gofmt check
+ }
+ le := f.gofmt(lhs)
+ re := f.gofmt(rhs)
+ if le == re {
+ f.Badf(stmt.Pos(), "self-assignment of %s to %s", re, le)
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/atomic.go b/llgo/third_party/go.tools/cmd/vet/atomic.go
new file mode 100644
index 0000000000000000000000000000000000000000..c084f13ab3fcdfafcd0cf427312dd1e32194f61a
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/atomic.go
@@ -0,0 +1,66 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+func init() {
+ register("atomic",
+ "check for common mistaken usages of the sync/atomic package",
+ checkAtomicAssignment,
+ assignStmt)
+}
+
+// checkAtomicAssignment walks the assignment statement checking for common
+// mistaken usage of atomic package, such as: x = atomic.AddUint64(&x, 1)
+func checkAtomicAssignment(f *File, node ast.Node) {
+ n := node.(*ast.AssignStmt)
+ if len(n.Lhs) != len(n.Rhs) {
+ return
+ }
+
+ for i, right := range n.Rhs {
+ call, ok := right.(*ast.CallExpr)
+ if !ok {
+ continue
+ }
+ sel, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ continue
+ }
+ pkg, ok := sel.X.(*ast.Ident)
+ if !ok || pkg.Name != "atomic" {
+ continue
+ }
+
+ switch sel.Sel.Name {
+ case "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr":
+ f.checkAtomicAddAssignment(n.Lhs[i], call)
+ }
+ }
+}
+
+// checkAtomicAddAssignment walks the atomic.Add* method calls checking for assigning the return value
+// to the same variable being used in the operation
+func (f *File) checkAtomicAddAssignment(left ast.Expr, call *ast.CallExpr) {
+ if len(call.Args) != 2 {
+ return
+ }
+ arg := call.Args[0]
+ broken := false
+
+ if uarg, ok := arg.(*ast.UnaryExpr); ok && uarg.Op == token.AND {
+ broken = f.gofmt(left) == f.gofmt(uarg.X)
+ } else if star, ok := left.(*ast.StarExpr); ok {
+ broken = f.gofmt(star.X) == f.gofmt(arg)
+ }
+
+ if broken {
+ f.Bad(left.Pos(), "direct assignment to atomic value")
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/bool.go b/llgo/third_party/go.tools/cmd/vet/bool.go
new file mode 100644
index 0000000000000000000000000000000000000000..e28c258801d345186c5043a105f8f6899a4b956c
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/bool.go
@@ -0,0 +1,185 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains boolean condition tests.
+
+package main
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+func init() {
+ register("bool",
+ "check for mistakes involving boolean operators",
+ checkBool,
+ binaryExpr)
+}
+
+func checkBool(f *File, n ast.Node) {
+ e := n.(*ast.BinaryExpr)
+
+ var op boolOp
+ switch e.Op {
+ case token.LOR:
+ op = or
+ case token.LAND:
+ op = and
+ default:
+ return
+ }
+
+ comm := op.commutativeSets(e)
+ for _, exprs := range comm {
+ op.checkRedundant(f, exprs)
+ op.checkSuspect(f, exprs)
+ }
+}
+
+type boolOp struct {
+ name string
+ tok token.Token // token corresponding to this operator
+ badEq token.Token // token corresponding to the equality test that should not be used with this operator
+}
+
+var (
+ or = boolOp{"or", token.LOR, token.NEQ}
+ and = boolOp{"and", token.LAND, token.EQL}
+)
+
+// commutativeSets returns all side effect free sets of
+// expressions in e that are connected by op.
+// For example, given 'a || b || f() || c || d' with the or op,
+// commutativeSets returns {{b, a}, {d, c}}.
+func (op boolOp) commutativeSets(e *ast.BinaryExpr) [][]ast.Expr {
+ exprs := op.split(e)
+
+ // Partition the slice of expressions into commutative sets.
+ i := 0
+ var sets [][]ast.Expr
+ for j := 0; j <= len(exprs); j++ {
+ if j == len(exprs) || hasSideEffects(exprs[j]) {
+ if i < j {
+ sets = append(sets, exprs[i:j])
+ }
+ i = j + 1
+ }
+ }
+
+ return sets
+}
+
+// checkRedundant checks for expressions of the form
+// e && e
+// e || e
+// Exprs must contain only side effect free expressions.
+func (op boolOp) checkRedundant(f *File, exprs []ast.Expr) {
+ seen := make(map[string]bool)
+ for _, e := range exprs {
+ efmt := f.gofmt(e)
+ if seen[efmt] {
+ f.Badf(e.Pos(), "redundant %s: %s %s %s", op.name, efmt, op.tok, efmt)
+ } else {
+ seen[efmt] = true
+ }
+ }
+}
+
+// checkSuspect checks for expressions of the form
+// x != c1 || x != c2
+// x == c1 && x == c2
+// where c1 and c2 are constant expressions.
+// If c1 and c2 are the same then it's redundant;
+// if c1 and c2 are different then it's always true or always false.
+// Exprs must contain only side effect free expressions.
+func (op boolOp) checkSuspect(f *File, exprs []ast.Expr) {
+ // seen maps from expressions 'x' to equality expressions 'x != c'.
+ seen := make(map[string]string)
+
+ for _, e := range exprs {
+ bin, ok := e.(*ast.BinaryExpr)
+ if !ok || bin.Op != op.badEq {
+ continue
+ }
+
+ // In order to avoid false positives, restrict to cases
+ // in which one of the operands is constant. We're then
+ // interested in the other operand.
+ // In the rare case in which both operands are constant
+ // (e.g. runtime.GOOS and "windows"), we'll only catch
+ // mistakes if the LHS is repeated, which is how most
+ // code is written.
+ var x ast.Expr
+ switch {
+ case f.pkg.types[bin.Y].Value != nil:
+ x = bin.X
+ case f.pkg.types[bin.X].Value != nil:
+ x = bin.Y
+ default:
+ continue
+ }
+
+ // e is of the form 'x != c' or 'x == c'.
+ xfmt := f.gofmt(x)
+ efmt := f.gofmt(e)
+ if prev, found := seen[xfmt]; found {
+ // checkRedundant handles the case in which efmt == prev.
+ if efmt != prev {
+ f.Badf(e.Pos(), "suspect %s: %s %s %s", op.name, efmt, op.tok, prev)
+ }
+ } else {
+ seen[xfmt] = efmt
+ }
+ }
+}
+
+// hasSideEffects reports whether evaluation of e has side effects.
+func hasSideEffects(e ast.Expr) bool {
+ safe := true
+ ast.Inspect(e, func(node ast.Node) bool {
+ switch n := node.(type) {
+ // Using CallExpr here will catch conversions
+ // as well as function and method invocations.
+ // We'll live with the false negatives for now.
+ case *ast.CallExpr:
+ safe = false
+ return false
+ case *ast.UnaryExpr:
+ if n.Op == token.ARROW {
+ safe = false
+ return false
+ }
+ }
+ return true
+ })
+ return !safe
+}
+
+// split returns a slice of all subexpressions in e that are connected by op.
+// For example, given 'a || (b || c) || d' with the or op,
+// split returns []{d, c, b, a}.
+func (op boolOp) split(e ast.Expr) (exprs []ast.Expr) {
+ for {
+ e = unparen(e)
+ if b, ok := e.(*ast.BinaryExpr); ok && b.Op == op.tok {
+ exprs = append(exprs, op.split(b.Y)...)
+ e = b.X
+ } else {
+ exprs = append(exprs, e)
+ break
+ }
+ }
+ return
+}
+
+func unparen(e ast.Expr) ast.Expr {
+ for {
+ p, ok := e.(*ast.ParenExpr)
+ if !ok {
+ return e
+ }
+ e = p.X
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/buildtag.go b/llgo/third_party/go.tools/cmd/vet/buildtag.go
new file mode 100644
index 0000000000000000000000000000000000000000..2d86edf734f378858a8b4c25c748f46059187502
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/buildtag.go
@@ -0,0 +1,91 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "strings"
+ "unicode"
+)
+
+var (
+ nl = []byte("\n")
+ slashSlash = []byte("//")
+ plusBuild = []byte("+build")
+)
+
+// checkBuildTag checks that build tags are in the correct location and well-formed.
+func checkBuildTag(name string, data []byte) {
+ if !vet("buildtags") {
+ return
+ }
+ lines := bytes.SplitAfter(data, nl)
+
+ // Determine cutpoint where +build comments are no longer valid.
+ // They are valid in leading // comments in the file followed by
+ // a blank line.
+ var cutoff int
+ for i, line := range lines {
+ line = bytes.TrimSpace(line)
+ if len(line) == 0 {
+ cutoff = i
+ continue
+ }
+ if bytes.HasPrefix(line, slashSlash) {
+ continue
+ }
+ break
+ }
+
+ for i, line := range lines {
+ line = bytes.TrimSpace(line)
+ if !bytes.HasPrefix(line, slashSlash) {
+ continue
+ }
+ text := bytes.TrimSpace(line[2:])
+ if bytes.HasPrefix(text, plusBuild) {
+ fields := bytes.Fields(text)
+ if !bytes.Equal(fields[0], plusBuild) {
+ // Comment is something like +buildasdf not +build.
+ fmt.Fprintf(os.Stderr, "%s:%d: possible malformed +build comment\n", name, i+1)
+ continue
+ }
+ if i >= cutoff {
+ fmt.Fprintf(os.Stderr, "%s:%d: +build comment must appear before package clause and be followed by a blank line\n", name, i+1)
+ setExit(1)
+ continue
+ }
+ // Check arguments.
+ Args:
+ for _, arg := range fields[1:] {
+ for _, elem := range strings.Split(string(arg), ",") {
+ if strings.HasPrefix(elem, "!!") {
+ fmt.Fprintf(os.Stderr, "%s:%d: invalid double negative in build constraint: %s\n", name, i+1, arg)
+ setExit(1)
+ break Args
+ }
+ if strings.HasPrefix(elem, "!") {
+ elem = elem[1:]
+ }
+ for _, c := range elem {
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) && c != '_' && c != '.' {
+ fmt.Fprintf(os.Stderr, "%s:%d: invalid non-alphanumeric build constraint: %s\n", name, i+1, arg)
+ setExit(1)
+ break Args
+ }
+ }
+ }
+ }
+ continue
+ }
+ // Comment with +build but not at beginning.
+ if bytes.Contains(line, plusBuild) && i < cutoff {
+ fmt.Fprintf(os.Stderr, "%s:%d: possible malformed +build comment\n", name, i+1)
+ continue
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/composite.go b/llgo/third_party/go.tools/cmd/vet/composite.go
new file mode 100644
index 0000000000000000000000000000000000000000..6fad1028917dc6b2dc99b51533f4e53a9ad731cd
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/composite.go
@@ -0,0 +1,125 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the test for unkeyed struct literals.
+
+package main
+
+import (
+ "flag"
+ "go/ast"
+ "strings"
+
+ "llvm.org/llgo/third_party/go.tools/cmd/vet/whitelist"
+)
+
+var compositeWhiteList = flag.Bool("compositewhitelist", true, "use composite white list; for testing only")
+
+func init() {
+ register("composites",
+ "check that composite literals used field-keyed elements",
+ checkUnkeyedLiteral,
+ compositeLit)
+}
+
+// checkUnkeyedLiteral checks if a composite literal is a struct literal with
+// unkeyed fields.
+func checkUnkeyedLiteral(f *File, node ast.Node) {
+ c := node.(*ast.CompositeLit)
+ typ := c.Type
+ for {
+ if typ1, ok := c.Type.(*ast.ParenExpr); ok {
+ typ = typ1
+ continue
+ }
+ break
+ }
+
+ switch typ.(type) {
+ case *ast.ArrayType:
+ return
+ case *ast.MapType:
+ return
+ case *ast.StructType:
+ return // a literal struct type does not need to use keys
+ case *ast.Ident:
+ // A simple type name like t or T does not need keys either,
+ // since it is almost certainly declared in the current package.
+ // (The exception is names being used via import . "pkg", but
+ // those are already breaking the Go 1 compatibility promise,
+ // so not reporting potential additional breakage seems okay.)
+ return
+ }
+
+ // Otherwise the type is a selector like pkg.Name.
+ // We only care if pkg.Name is a struct, not if it's a map, array, or slice.
+ isStruct, typeString := f.pkg.isStruct(c)
+ if !isStruct {
+ return
+ }
+
+ if typeString == "" { // isStruct doesn't know
+ typeString = f.gofmt(typ)
+ }
+
+ // It's a struct, or we can't tell it's not a struct because we don't have types.
+
+ // Check if the CompositeLit contains an unkeyed field.
+ allKeyValue := true
+ for _, e := range c.Elts {
+ if _, ok := e.(*ast.KeyValueExpr); !ok {
+ allKeyValue = false
+ break
+ }
+ }
+ if allKeyValue {
+ return
+ }
+
+ // Check that the CompositeLit's type has the form pkg.Typ.
+ s, ok := c.Type.(*ast.SelectorExpr)
+ if !ok {
+ return
+ }
+ pkg, ok := s.X.(*ast.Ident)
+ if !ok {
+ return
+ }
+
+ // Convert the package name to an import path, and compare to a whitelist.
+ path := pkgPath(f, pkg.Name)
+ if path == "" {
+ f.Badf(c.Pos(), "unresolvable package for %s.%s literal", pkg.Name, s.Sel.Name)
+ return
+ }
+ typeName := path + "." + s.Sel.Name
+ if *compositeWhiteList && whitelist.UnkeyedLiteral[typeName] {
+ return
+ }
+
+ f.Bad(c.Pos(), typeString+" composite literal uses unkeyed fields")
+}
+
+// pkgPath returns the import path "image/png" for the package name "png".
+//
+// This is based purely on syntax and convention, and not on the imported
+// package's contents. It will be incorrect if a package name differs from the
+// leaf element of the import path, or if the package was a dot import.
+func pkgPath(f *File, pkgName string) (path string) {
+ for _, x := range f.file.Imports {
+ s := strings.Trim(x.Path.Value, `"`)
+ if x.Name != nil {
+ // Catch `import pkgName "foo/bar"`.
+ if x.Name.Name == pkgName {
+ return s
+ }
+ } else {
+ // Catch `import "pkgName"` or `import "foo/bar/pkgName"`.
+ if s == pkgName || strings.HasSuffix(s, "/"+pkgName) {
+ return s
+ }
+ }
+ }
+ return ""
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/copylock.go b/llgo/third_party/go.tools/cmd/vet/copylock.go
new file mode 100644
index 0000000000000000000000000000000000000000..ea0d2a67fe636fc887ee7c342aa2e2a6c0c8ad85
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/copylock.go
@@ -0,0 +1,155 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the code to check that locks are not passed by value.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+func init() {
+ register("copylocks",
+ "check that locks are not passed by value",
+ checkCopyLocks,
+ funcDecl, rangeStmt)
+}
+
+// checkCopyLocks checks whether node might
+// inadvertently copy a lock.
+func checkCopyLocks(f *File, node ast.Node) {
+ switch node := node.(type) {
+ case *ast.RangeStmt:
+ checkCopyLocksRange(f, node)
+ case *ast.FuncDecl:
+ checkCopyLocksFunc(f, node)
+ }
+}
+
+// checkCopyLocksFunc checks whether a function might
+// inadvertently copy a lock, by checking whether
+// its receiver, parameters, or return values
+// are locks.
+func checkCopyLocksFunc(f *File, d *ast.FuncDecl) {
+ if d.Recv != nil && len(d.Recv.List) > 0 {
+ expr := d.Recv.List[0].Type
+ if path := lockPath(f.pkg.typesPkg, f.pkg.types[expr].Type); path != nil {
+ f.Badf(expr.Pos(), "%s passes Lock by value: %v", d.Name.Name, path)
+ }
+ }
+
+ if d.Type.Params != nil {
+ for _, field := range d.Type.Params.List {
+ expr := field.Type
+ if path := lockPath(f.pkg.typesPkg, f.pkg.types[expr].Type); path != nil {
+ f.Badf(expr.Pos(), "%s passes Lock by value: %v", d.Name.Name, path)
+ }
+ }
+ }
+
+ if d.Type.Results != nil {
+ for _, field := range d.Type.Results.List {
+ expr := field.Type
+ if path := lockPath(f.pkg.typesPkg, f.pkg.types[expr].Type); path != nil {
+ f.Badf(expr.Pos(), "%s returns Lock by value: %v", d.Name.Name, path)
+ }
+ }
+ }
+}
+
+// checkCopyLocksRange checks whether a range statement
+// might inadvertently copy a lock by checking whether
+// any of the range variables are locks.
+func checkCopyLocksRange(f *File, r *ast.RangeStmt) {
+ checkCopyLocksRangeVar(f, r.Tok, r.Key)
+ checkCopyLocksRangeVar(f, r.Tok, r.Value)
+}
+
+func checkCopyLocksRangeVar(f *File, rtok token.Token, e ast.Expr) {
+ if e == nil {
+ return
+ }
+ id, isId := e.(*ast.Ident)
+ if isId && id.Name == "_" {
+ return
+ }
+
+ var typ types.Type
+ if rtok == token.DEFINE {
+ if !isId {
+ return
+ }
+ obj := f.pkg.defs[id]
+ if obj == nil {
+ return
+ }
+ typ = obj.Type()
+ } else {
+ typ = f.pkg.types[e].Type
+ }
+
+ if typ == nil {
+ return
+ }
+ if path := lockPath(f.pkg.typesPkg, typ); path != nil {
+ f.Badf(e.Pos(), "range var %s copies Lock: %v", f.gofmt(e), path)
+ }
+}
+
+type typePath []types.Type
+
+// String pretty-prints a typePath.
+func (path typePath) String() string {
+ n := len(path)
+ var buf bytes.Buffer
+ for i := range path {
+ if i > 0 {
+ fmt.Fprint(&buf, " contains ")
+ }
+ // The human-readable path is in reverse order, outermost to innermost.
+ fmt.Fprint(&buf, path[n-i-1].String())
+ }
+ return buf.String()
+}
+
+// lockPath returns a typePath describing the location of a lock value
+// contained in typ. If there is no contained lock, it returns nil.
+func lockPath(tpkg *types.Package, typ types.Type) typePath {
+ if typ == nil {
+ return nil
+ }
+
+ // We're only interested in the case in which the underlying
+ // type is a struct. (Interfaces and pointers are safe to copy.)
+ styp, ok := typ.Underlying().(*types.Struct)
+ if !ok {
+ return nil
+ }
+
+ // We're looking for cases in which a reference to this type
+ // can be locked, but a value cannot. This differentiates
+ // embedded interfaces from embedded values.
+ if plock := types.NewMethodSet(types.NewPointer(typ)).Lookup(tpkg, "Lock"); plock != nil {
+ if lock := types.NewMethodSet(typ).Lookup(tpkg, "Lock"); lock == nil {
+ return []types.Type{typ}
+ }
+ }
+
+ nfields := styp.NumFields()
+ for i := 0; i < nfields; i++ {
+ ftyp := styp.Field(i).Type()
+ subpath := lockPath(tpkg, ftyp)
+ if subpath != nil {
+ return append(subpath, typ)
+ }
+ }
+
+ return nil
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/deadcode.go b/llgo/third_party/go.tools/cmd/vet/deadcode.go
new file mode 100644
index 0000000000000000000000000000000000000000..3b306c21045e417a3f5645f763ca5db9f3170ec6
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/deadcode.go
@@ -0,0 +1,296 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check for syntactically unreachable code.
+
+package main
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+func init() {
+ register("unreachable",
+ "check for unreachable code",
+ checkUnreachable,
+ funcDecl, funcLit)
+}
+
+type deadState struct {
+ f *File
+ hasBreak map[ast.Stmt]bool
+ hasGoto map[string]bool
+ labels map[string]ast.Stmt
+ breakTarget ast.Stmt
+
+ reachable bool
+}
+
+// checkUnreachable checks a function body for dead code.
+func checkUnreachable(f *File, node ast.Node) {
+ var body *ast.BlockStmt
+ switch n := node.(type) {
+ case *ast.FuncDecl:
+ body = n.Body
+ case *ast.FuncLit:
+ body = n.Body
+ }
+ if body == nil {
+ return
+ }
+
+ d := &deadState{
+ f: f,
+ hasBreak: make(map[ast.Stmt]bool),
+ hasGoto: make(map[string]bool),
+ labels: make(map[string]ast.Stmt),
+ }
+
+ d.findLabels(body)
+
+ d.reachable = true
+ d.findDead(body)
+}
+
+// findLabels gathers information about the labels defined and used by stmt
+// and about which statements break, whether a label is involved or not.
+func (d *deadState) findLabels(stmt ast.Stmt) {
+ switch x := stmt.(type) {
+ default:
+ d.f.Warnf(x.Pos(), "internal error in findLabels: unexpected statement %T", x)
+
+ case *ast.AssignStmt,
+ *ast.BadStmt,
+ *ast.DeclStmt,
+ *ast.DeferStmt,
+ *ast.EmptyStmt,
+ *ast.ExprStmt,
+ *ast.GoStmt,
+ *ast.IncDecStmt,
+ *ast.ReturnStmt,
+ *ast.SendStmt:
+ // no statements inside
+
+ case *ast.BlockStmt:
+ for _, stmt := range x.List {
+ d.findLabels(stmt)
+ }
+
+ case *ast.BranchStmt:
+ switch x.Tok {
+ case token.GOTO:
+ if x.Label != nil {
+ d.hasGoto[x.Label.Name] = true
+ }
+
+ case token.BREAK:
+ stmt := d.breakTarget
+ if x.Label != nil {
+ stmt = d.labels[x.Label.Name]
+ }
+ if stmt != nil {
+ d.hasBreak[stmt] = true
+ }
+ }
+
+ case *ast.IfStmt:
+ d.findLabels(x.Body)
+ if x.Else != nil {
+ d.findLabels(x.Else)
+ }
+
+ case *ast.LabeledStmt:
+ d.labels[x.Label.Name] = x.Stmt
+ d.findLabels(x.Stmt)
+
+ // These cases are all the same, but the x.Body only works
+ // when the specific type of x is known, so the cases cannot
+ // be merged.
+ case *ast.ForStmt:
+ outer := d.breakTarget
+ d.breakTarget = x
+ d.findLabels(x.Body)
+ d.breakTarget = outer
+
+ case *ast.RangeStmt:
+ outer := d.breakTarget
+ d.breakTarget = x
+ d.findLabels(x.Body)
+ d.breakTarget = outer
+
+ case *ast.SelectStmt:
+ outer := d.breakTarget
+ d.breakTarget = x
+ d.findLabels(x.Body)
+ d.breakTarget = outer
+
+ case *ast.SwitchStmt:
+ outer := d.breakTarget
+ d.breakTarget = x
+ d.findLabels(x.Body)
+ d.breakTarget = outer
+
+ case *ast.TypeSwitchStmt:
+ outer := d.breakTarget
+ d.breakTarget = x
+ d.findLabels(x.Body)
+ d.breakTarget = outer
+
+ case *ast.CommClause:
+ for _, stmt := range x.Body {
+ d.findLabels(stmt)
+ }
+
+ case *ast.CaseClause:
+ for _, stmt := range x.Body {
+ d.findLabels(stmt)
+ }
+ }
+}
+
+// findDead walks the statement looking for dead code.
+// If d.reachable is false on entry, stmt itself is dead.
+// When findDead returns, d.reachable tells whether the
+// statement following stmt is reachable.
+func (d *deadState) findDead(stmt ast.Stmt) {
+ // Is this a labeled goto target?
+ // If so, assume it is reachable due to the goto.
+ // This is slightly conservative, in that we don't
+ // check that the goto is reachable, so
+ // L: goto L
+ // will not provoke a warning.
+ // But it's good enough.
+ if x, isLabel := stmt.(*ast.LabeledStmt); isLabel && d.hasGoto[x.Label.Name] {
+ d.reachable = true
+ }
+
+ if !d.reachable {
+ switch stmt.(type) {
+ case *ast.EmptyStmt:
+ // do not warn about unreachable empty statements
+ default:
+ d.f.Bad(stmt.Pos(), "unreachable code")
+ d.reachable = true // silence error about next statement
+ }
+ }
+
+ switch x := stmt.(type) {
+ default:
+ d.f.Warnf(x.Pos(), "internal error in findDead: unexpected statement %T", x)
+
+ case *ast.AssignStmt,
+ *ast.BadStmt,
+ *ast.DeclStmt,
+ *ast.DeferStmt,
+ *ast.EmptyStmt,
+ *ast.GoStmt,
+ *ast.IncDecStmt,
+ *ast.SendStmt:
+ // no control flow
+
+ case *ast.BlockStmt:
+ for _, stmt := range x.List {
+ d.findDead(stmt)
+ }
+
+ case *ast.BranchStmt:
+ switch x.Tok {
+ case token.BREAK, token.GOTO, token.FALLTHROUGH:
+ d.reachable = false
+ case token.CONTINUE:
+ // NOTE: We accept "continue" statements as terminating.
+ // They are not necessary in the spec definition of terminating,
+ // because a continue statement cannot be the final statement
+ // before a return. But for the more general problem of syntactically
+ // identifying dead code, continue redirects control flow just
+ // like the other terminating statements.
+ d.reachable = false
+ }
+
+ case *ast.ExprStmt:
+ // Call to panic?
+ call, ok := x.X.(*ast.CallExpr)
+ if ok {
+ name, ok := call.Fun.(*ast.Ident)
+ if ok && name.Name == "panic" && name.Obj == nil {
+ d.reachable = false
+ }
+ }
+
+ case *ast.ForStmt:
+ d.findDead(x.Body)
+ d.reachable = x.Cond != nil || d.hasBreak[x]
+
+ case *ast.IfStmt:
+ d.findDead(x.Body)
+ if x.Else != nil {
+ r := d.reachable
+ d.reachable = true
+ d.findDead(x.Else)
+ d.reachable = d.reachable || r
+ } else {
+ // might not have executed if statement
+ d.reachable = true
+ }
+
+ case *ast.LabeledStmt:
+ d.findDead(x.Stmt)
+
+ case *ast.RangeStmt:
+ d.findDead(x.Body)
+ d.reachable = true
+
+ case *ast.ReturnStmt:
+ d.reachable = false
+
+ case *ast.SelectStmt:
+ // NOTE: Unlike switch and type switch below, we don't care
+ // whether a select has a default, because a select without a
+ // default blocks until one of the cases can run. That's different
+ // from a switch without a default, which behaves like it has
+ // a default with an empty body.
+ anyReachable := false
+ for _, comm := range x.Body.List {
+ d.reachable = true
+ for _, stmt := range comm.(*ast.CommClause).Body {
+ d.findDead(stmt)
+ }
+ anyReachable = anyReachable || d.reachable
+ }
+ d.reachable = anyReachable || d.hasBreak[x]
+
+ case *ast.SwitchStmt:
+ anyReachable := false
+ hasDefault := false
+ for _, cas := range x.Body.List {
+ cc := cas.(*ast.CaseClause)
+ if cc.List == nil {
+ hasDefault = true
+ }
+ d.reachable = true
+ for _, stmt := range cc.Body {
+ d.findDead(stmt)
+ }
+ anyReachable = anyReachable || d.reachable
+ }
+ d.reachable = anyReachable || d.hasBreak[x] || !hasDefault
+
+ case *ast.TypeSwitchStmt:
+ anyReachable := false
+ hasDefault := false
+ for _, cas := range x.Body.List {
+ cc := cas.(*ast.CaseClause)
+ if cc.List == nil {
+ hasDefault = true
+ }
+ d.reachable = true
+ for _, stmt := range cc.Body {
+ d.findDead(stmt)
+ }
+ anyReachable = anyReachable || d.reachable
+ }
+ d.reachable = anyReachable || d.hasBreak[x] || !hasDefault
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/doc.go b/llgo/third_party/go.tools/cmd/vet/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..e90a8b87d5e450c6ee8b84703c4254b1aad2ff12
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/doc.go
@@ -0,0 +1,182 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+
+Vet examines Go source code and reports suspicious constructs, such as Printf
+calls whose arguments do not align with the format string. Vet uses heuristics
+that do not guarantee all reports are genuine problems, but it can find errors
+not caught by the compilers.
+
+It can be invoked three ways:
+
+By package, from the go tool:
+ go vet package/path/name
+vets the package whose path is provided.
+
+By files:
+ go tool vet source/directory/*.go
+vets the files named, all of which must be in the same package.
+
+By directory:
+ go tool vet source/directory
+recursively descends the directory, vetting each file in isolation.
+Package-level type-checking is disabled, so the vetting is weaker.
+
+Vet's exit code is 2 for erroneous invocation of the tool, 1 if a
+problem was reported, and 0 otherwise. Note that the tool does not
+check every possible problem and depends on unreliable heuristics
+so it should be used as guidance only, not as a firm indicator of
+program correctness.
+
+By default all checks are performed. If any flags are explicitly set
+to true, only those tests are run. Conversely, if any flag is
+explicitly set to false, only those tests are disabled.
+Thus -printf=true runs the printf check, -printf=false runs all checks
+except the printf check.
+
+Available checks:
+
+Printf family
+
+Flag: -printf
+
+Suspicious calls to functions in the Printf family, including any functions
+with these names, disregarding case:
+ Print Printf Println
+ Fprint Fprintf Fprintln
+ Sprint Sprintf Sprintln
+ Error Errorf
+ Fatal Fatalf
+ Log Logf
+ Panic Panicf Panicln
+If the function name ends with an 'f', the function is assumed to take
+a format descriptor string in the manner of fmt.Printf. If not, vet
+complains about arguments that look like format descriptor strings.
+
+It also checks for errors such as using a Writer as the first argument of
+Printf.
+
+Methods
+
+Flag: -methods
+
+Non-standard signatures for methods with familiar names, including:
+ Format GobEncode GobDecode MarshalJSON MarshalXML
+ Peek ReadByte ReadFrom ReadRune Scan Seek
+ UnmarshalJSON UnreadByte UnreadRune WriteByte
+ WriteTo
+
+Struct tags
+
+Flag: -structtags
+
+Struct tags that do not follow the format understood by reflect.StructTag.Get.
+Well-known encoding struct tags (json, xml) used with unexported fields.
+
+Unkeyed composite literals
+
+Flag: -composites
+
+Composite struct literals that do not use the field-keyed syntax.
+
+Assembly declarations
+
+Flag: -asmdecl
+
+Mismatches between assembly files and Go function declarations.
+
+Useless assignments
+
+Flag: -assign
+
+Check for useless assignments.
+
+Atomic mistakes
+
+Flag: -atomic
+
+Common mistaken usages of the sync/atomic package.
+
+Boolean conditions
+
+Flag: -bool
+
+Mistakes involving boolean operators.
+
+Build tags
+
+Flag: -buildtags
+
+Badly formed or misplaced +build tags.
+
+Copying locks
+
+Flag: -copylocks
+
+Locks that are erroneously passed by value.
+
+Nil function comparison
+
+Flag: -nilfunc
+
+Comparisons between functions and nil.
+
+Range loop variables
+
+Flag: -rangeloops
+
+Incorrect uses of range loop variables in closures.
+
+Unreachable code
+
+Flag: -unreachable
+
+Unreachable code.
+
+Shadowed variables
+
+Flag: -shadow=false (experimental; must be set explicitly)
+
+Variables that may have been unintentionally shadowed.
+
+Misuse of unsafe Pointers
+
+Flag: -unsafeptr
+
+Likely incorrect uses of unsafe.Pointer to convert integers to pointers.
+A conversion from uintptr to unsafe.Pointer is invalid if it implies that
+there is a uintptr-typed word in memory that holds a pointer value,
+because that word will be invisible to stack copying and to the garbage
+collector.
+
+Shifts
+
+Flag: -shift
+
+Shifts equal to or longer than the variable's length.
+
+Other flags
+
+These flags configure the behavior of vet:
+
+ -all (default true)
+ Check everything; disabled if any explicit check is requested.
+ -v
+ Verbose mode
+ -printfuncs
+ A comma-separated list of print-like functions to supplement
+ the standard list. Each entry is in the form Name:N where N
+ is the zero-based argument position of the first argument
+ involved in the print: either the format or the first print
+ argument for non-formatted prints. For example,
+ if you have Warn and Warnf functions that take an
+ io.Writer as their first argument, like Fprintf,
+ -printfuncs=Warn:1,Warnf:1
+ -shadowstrict
+ Whether to be strict about shadowing; can be noisy.
+ -test
+ For testing only: sets -all and -shadow.
+*/
+package main
diff --git a/llgo/third_party/go.tools/cmd/vet/main.go b/llgo/third_party/go.tools/cmd/vet/main.go
new file mode 100644
index 0000000000000000000000000000000000000000..63832e57cc9e3fbb0c32b8e8a6f8359fec094452
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/main.go
@@ -0,0 +1,493 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Vet is a simple checker for static errors in Go source code.
+// See doc.go for more information.
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ _ "llvm.org/llgo/third_party/go.tools/go/gcimporter"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// TODO: Need a flag to set build tags when parsing the package.
+
+var verbose = flag.Bool("v", false, "verbose")
+var testFlag = flag.Bool("test", false, "for testing only: sets -all and -shadow")
+var exitCode = 0
+
+// "all" is here only for the appearance of backwards compatibility.
+// It has no effect; the triState flags do the work.
+var all = flag.Bool("all", true, "check everything; disabled if any explicit check is requested")
+
+// Flags to control which individual checks to perform.
+var report = map[string]*triState{
+ // Only unusual checks are written here.
+ // Most checks that operate during the AST walk are added by register.
+ "asmdecl": triStateFlag("asmdecl", unset, "check assembly against Go declarations"),
+ "buildtags": triStateFlag("buildtags", unset, "check that +build tags are valid"),
+}
+
+// experimental records the flags enabling experimental features. These must be
+// requested explicitly; they are not enabled by -all.
+var experimental = map[string]bool{}
+
+// setTrueCount record how many flags are explicitly set to true.
+var setTrueCount int
+
+// A triState is a boolean that knows whether it has been set to either true or false.
+// It is used to identify if a flag appears; the standard boolean flag cannot
+// distinguish missing from unset. It also satisfies flag.Value.
+type triState int
+
+const (
+ unset triState = iota
+ setTrue
+ setFalse
+)
+
+func triStateFlag(name string, value triState, usage string) *triState {
+ flag.Var(&value, name, usage)
+ return &value
+}
+
+// triState implements flag.Value, flag.Getter, and flag.boolFlag.
+// They work like boolean flags: we can say vet -printf as well as vet -printf=true
+func (ts *triState) Get() interface{} {
+ return *ts == setTrue
+}
+
+func (ts triState) isTrue() bool {
+ return ts == setTrue
+}
+
+func (ts *triState) Set(value string) error {
+ b, err := strconv.ParseBool(value)
+ if err != nil {
+ return err
+ }
+ if b {
+ *ts = setTrue
+ setTrueCount++
+ } else {
+ *ts = setFalse
+ }
+ return nil
+}
+
+func (ts *triState) String() string {
+ switch *ts {
+ case unset:
+ return "unset"
+ case setTrue:
+ return "true"
+ case setFalse:
+ return "false"
+ }
+ panic("not reached")
+}
+
+func (ts triState) IsBoolFlag() bool {
+ return true
+}
+
+// vet tells whether to report errors for the named check, a flag name.
+func vet(name string) bool {
+ if *testFlag {
+ return true
+ }
+ return report[name].isTrue()
+}
+
+// setExit sets the value for os.Exit when it is called, later. It
+// remembers the highest value.
+func setExit(err int) {
+ if err > exitCode {
+ exitCode = err
+ }
+}
+
+var (
+ // Each of these vars has a corresponding case in (*File).Visit.
+ assignStmt *ast.AssignStmt
+ binaryExpr *ast.BinaryExpr
+ callExpr *ast.CallExpr
+ compositeLit *ast.CompositeLit
+ field *ast.Field
+ funcDecl *ast.FuncDecl
+ funcLit *ast.FuncLit
+ genDecl *ast.GenDecl
+ interfaceType *ast.InterfaceType
+ rangeStmt *ast.RangeStmt
+
+ // checkers is a two-level map.
+ // The outer level is keyed by a nil pointer, one of the AST vars above.
+ // The inner level is keyed by checker name.
+ checkers = make(map[ast.Node]map[string]func(*File, ast.Node))
+)
+
+func register(name, usage string, fn func(*File, ast.Node), types ...ast.Node) {
+ report[name] = triStateFlag(name, unset, usage)
+ for _, typ := range types {
+ m := checkers[typ]
+ if m == nil {
+ m = make(map[string]func(*File, ast.Node))
+ checkers[typ] = m
+ }
+ m[name] = fn
+ }
+}
+
+// Usage is a replacement usage function for the flags package.
+func Usage() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ fmt.Fprintf(os.Stderr, "\tvet [flags] directory...\n")
+ fmt.Fprintf(os.Stderr, "\tvet [flags] files... # Must be a single package\n")
+ fmt.Fprintf(os.Stderr, "For more information run\n")
+ fmt.Fprintf(os.Stderr, "\tgodoc golang.org/x/tools/cmd/vet\n\n")
+ fmt.Fprintf(os.Stderr, "Flags:\n")
+ flag.PrintDefaults()
+ os.Exit(2)
+}
+
+// File is a wrapper for the state of a file used in the parser.
+// The parse tree walkers are all methods of this type.
+type File struct {
+ pkg *Package
+ fset *token.FileSet
+ name string
+ content []byte
+ file *ast.File
+ b bytes.Buffer // for use by methods
+
+ // The objects that are receivers of a "String() string" method.
+ // This is used by the recursiveStringer method in print.go.
+ stringers map[*ast.Object]bool
+
+ // Registered checkers to run.
+ checkers map[ast.Node][]func(*File, ast.Node)
+}
+
+func main() {
+ flag.Usage = Usage
+ flag.Parse()
+
+ // If any flag is set, we run only those checks requested.
+ // If no flags are set true, set all the non-experimental ones not explicitly set (in effect, set the "-all" flag).
+ if setTrueCount == 0 {
+ for name, setting := range report {
+ if *setting == unset && !experimental[name] {
+ *setting = setTrue
+ }
+ }
+ }
+
+ if *printfuncs != "" {
+ for _, name := range strings.Split(*printfuncs, ",") {
+ if len(name) == 0 {
+ flag.Usage()
+ }
+ skip := 0
+ if colon := strings.LastIndex(name, ":"); colon > 0 {
+ var err error
+ skip, err = strconv.Atoi(name[colon+1:])
+ if err != nil {
+ errorf(`illegal format for "Func:N" argument %q; %s`, name, err)
+ }
+ name = name[:colon]
+ }
+ name = strings.ToLower(name)
+ if name[len(name)-1] == 'f' {
+ printfList[name] = skip
+ } else {
+ printList[name] = skip
+ }
+ }
+ }
+
+ if flag.NArg() == 0 {
+ Usage()
+ }
+ dirs := false
+ files := false
+ for _, name := range flag.Args() {
+ // Is it a directory?
+ fi, err := os.Stat(name)
+ if err != nil {
+ warnf("error walking tree: %s", err)
+ continue
+ }
+ if fi.IsDir() {
+ dirs = true
+ } else {
+ files = true
+ }
+ }
+ if dirs && files {
+ Usage()
+ }
+ if dirs {
+ for _, name := range flag.Args() {
+ walkDir(name)
+ }
+ os.Exit(exitCode)
+ }
+ if !doPackage(".", flag.Args()) {
+ warnf("no files checked")
+ }
+ os.Exit(exitCode)
+}
+
+// prefixDirectory places the directory name on the beginning of each name in the list.
+func prefixDirectory(directory string, names []string) {
+ if directory != "." {
+ for i, name := range names {
+ names[i] = filepath.Join(directory, name)
+ }
+ }
+}
+
+// doPackageDir analyzes the single package found in the directory, if there is one,
+// plus a test package, if there is one.
+func doPackageDir(directory string) {
+ pkg, err := build.Default.ImportDir(directory, 0)
+ if err != nil {
+ // If it's just that there are no go source files, that's fine.
+ if _, nogo := err.(*build.NoGoError); nogo {
+ return
+ }
+ // Non-fatal: we are doing a recursive walk and there may be other directories.
+ warnf("cannot process directory %s: %s", directory, err)
+ return
+ }
+ var names []string
+ names = append(names, pkg.GoFiles...)
+ names = append(names, pkg.CgoFiles...)
+ names = append(names, pkg.TestGoFiles...) // These are also in the "foo" package.
+ names = append(names, pkg.SFiles...)
+ prefixDirectory(directory, names)
+ doPackage(directory, names)
+ // Is there also a "foo_test" package? If so, do that one as well.
+ if len(pkg.XTestGoFiles) > 0 {
+ names = pkg.XTestGoFiles
+ prefixDirectory(directory, names)
+ doPackage(directory, names)
+ }
+}
+
+type Package struct {
+ path string
+ defs map[*ast.Ident]types.Object
+ uses map[*ast.Ident]types.Object
+ types map[ast.Expr]types.TypeAndValue
+ spans map[types.Object]Span
+ files []*File
+ typesPkg *types.Package
+}
+
+// doPackage analyzes the single package constructed from the named files.
+// It returns whether any files were checked.
+func doPackage(directory string, names []string) bool {
+ var files []*File
+ var astFiles []*ast.File
+ fs := token.NewFileSet()
+ for _, name := range names {
+ data, err := ioutil.ReadFile(name)
+ if err != nil {
+ // Warn but continue to next package.
+ warnf("%s: %s", name, err)
+ return false
+ }
+ checkBuildTag(name, data)
+ var parsedFile *ast.File
+ if strings.HasSuffix(name, ".go") {
+ parsedFile, err = parser.ParseFile(fs, name, data, 0)
+ if err != nil {
+ warnf("%s: %s", name, err)
+ return false
+ }
+ astFiles = append(astFiles, parsedFile)
+ }
+ files = append(files, &File{fset: fs, content: data, name: name, file: parsedFile})
+ }
+ if len(astFiles) == 0 {
+ return false
+ }
+ pkg := new(Package)
+ pkg.path = astFiles[0].Name.Name
+ pkg.files = files
+ // Type check the package.
+ err := pkg.check(fs, astFiles)
+ if err != nil && *verbose {
+ warnf("%s", err)
+ }
+
+ // Check.
+ chk := make(map[ast.Node][]func(*File, ast.Node))
+ for typ, set := range checkers {
+ for name, fn := range set {
+ if vet(name) {
+ chk[typ] = append(chk[typ], fn)
+ }
+ }
+ }
+ for _, file := range files {
+ file.pkg = pkg
+ file.checkers = chk
+ if file.file != nil {
+ file.walkFile(file.name, file.file)
+ }
+ }
+ asmCheck(pkg)
+ return true
+}
+
+func visit(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ warnf("walk error: %s", err)
+ return err
+ }
+ // One package per directory. Ignore the files themselves.
+ if !f.IsDir() {
+ return nil
+ }
+ doPackageDir(path)
+ return nil
+}
+
+func (pkg *Package) hasFileWithSuffix(suffix string) bool {
+ for _, f := range pkg.files {
+ if strings.HasSuffix(f.name, suffix) {
+ return true
+ }
+ }
+ return false
+}
+
+// walkDir recursively walks the tree looking for Go packages.
+func walkDir(root string) {
+ filepath.Walk(root, visit)
+}
+
+// errorf formats the error to standard error, adding program
+// identification and a newline, and exits.
+func errorf(format string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, "vet: "+format+"\n", args...)
+ os.Exit(2)
+}
+
+// warnf formats the error to standard error, adding program
+// identification and a newline, but does not exit.
+func warnf(format string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, "vet: "+format+"\n", args...)
+ setExit(1)
+}
+
+// Println is fmt.Println guarded by -v.
+func Println(args ...interface{}) {
+ if !*verbose {
+ return
+ }
+ fmt.Println(args...)
+}
+
+// Printf is fmt.Printf guarded by -v.
+func Printf(format string, args ...interface{}) {
+ if !*verbose {
+ return
+ }
+ fmt.Printf(format+"\n", args...)
+}
+
+// Bad reports an error and sets the exit code..
+func (f *File) Bad(pos token.Pos, args ...interface{}) {
+ f.Warn(pos, args...)
+ setExit(1)
+}
+
+// Badf reports a formatted error and sets the exit code.
+func (f *File) Badf(pos token.Pos, format string, args ...interface{}) {
+ f.Warnf(pos, format, args...)
+ setExit(1)
+}
+
+// loc returns a formatted representation of the position.
+func (f *File) loc(pos token.Pos) string {
+ if pos == token.NoPos {
+ return ""
+ }
+ // Do not print columns. Because the pos often points to the start of an
+ // expression instead of the inner part with the actual error, the
+ // precision can mislead.
+ posn := f.fset.Position(pos)
+ return fmt.Sprintf("%s:%d: ", posn.Filename, posn.Line)
+}
+
+// Warn reports an error but does not set the exit code.
+func (f *File) Warn(pos token.Pos, args ...interface{}) {
+ fmt.Fprint(os.Stderr, f.loc(pos)+fmt.Sprintln(args...))
+}
+
+// Warnf reports a formatted error but does not set the exit code.
+func (f *File) Warnf(pos token.Pos, format string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, f.loc(pos)+format+"\n", args...)
+}
+
+// walkFile walks the file's tree.
+func (f *File) walkFile(name string, file *ast.File) {
+ Println("Checking file", name)
+ ast.Walk(f, file)
+}
+
+// Visit implements the ast.Visitor interface.
+func (f *File) Visit(node ast.Node) ast.Visitor {
+ var key ast.Node
+ switch node.(type) {
+ case *ast.AssignStmt:
+ key = assignStmt
+ case *ast.BinaryExpr:
+ key = binaryExpr
+ case *ast.CallExpr:
+ key = callExpr
+ case *ast.CompositeLit:
+ key = compositeLit
+ case *ast.Field:
+ key = field
+ case *ast.FuncDecl:
+ key = funcDecl
+ case *ast.FuncLit:
+ key = funcLit
+ case *ast.GenDecl:
+ key = genDecl
+ case *ast.InterfaceType:
+ key = interfaceType
+ case *ast.RangeStmt:
+ key = rangeStmt
+ }
+ for _, fn := range f.checkers[key] {
+ fn(f, node)
+ }
+ return f
+}
+
+// gofmt returns a string representation of the expression.
+func (f *File) gofmt(x ast.Expr) string {
+ f.b.Reset()
+ printer.Fprint(&f.b, f.fset, x)
+ return f.b.String()
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/method.go b/llgo/third_party/go.tools/cmd/vet/method.go
new file mode 100644
index 0000000000000000000000000000000000000000..00949df437295c5505ec960c6f869105fb1d44dc
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/method.go
@@ -0,0 +1,182 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the code to check canonical methods.
+
+package main
+
+import (
+ "fmt"
+ "go/ast"
+ "go/printer"
+ "strings"
+)
+
+func init() {
+ register("methods",
+ "check that canonically named methods are canonically defined",
+ checkCanonicalMethod,
+ funcDecl, interfaceType)
+}
+
+type MethodSig struct {
+ args []string
+ results []string
+}
+
+// canonicalMethods lists the input and output types for Go methods
+// that are checked using dynamic interface checks. Because the
+// checks are dynamic, such methods would not cause a compile error
+// if they have the wrong signature: instead the dynamic check would
+// fail, sometimes mysteriously. If a method is found with a name listed
+// here but not the input/output types listed here, vet complains.
+//
+// A few of the canonical methods have very common names.
+// For example, a type might implement a Scan method that
+// has nothing to do with fmt.Scanner, but we still want to check
+// the methods that are intended to implement fmt.Scanner.
+// To do that, the arguments that have a = prefix are treated as
+// signals that the canonical meaning is intended: if a Scan
+// method doesn't have a fmt.ScanState as its first argument,
+// we let it go. But if it does have a fmt.ScanState, then the
+// rest has to match.
+var canonicalMethods = map[string]MethodSig{
+ // "Flush": {{}, {"error"}}, // http.Flusher and jpeg.writer conflict
+ "Format": {[]string{"=fmt.State", "rune"}, []string{}}, // fmt.Formatter
+ "GobDecode": {[]string{"[]byte"}, []string{"error"}}, // gob.GobDecoder
+ "GobEncode": {[]string{}, []string{"[]byte", "error"}}, // gob.GobEncoder
+ "MarshalJSON": {[]string{}, []string{"[]byte", "error"}}, // json.Marshaler
+ "MarshalXML": {[]string{"*xml.Encoder", "xml.StartElement"}, []string{"error"}}, // xml.Marshaler
+ "Peek": {[]string{"=int"}, []string{"[]byte", "error"}}, // image.reader (matching bufio.Reader)
+ "ReadByte": {[]string{}, []string{"byte", "error"}}, // io.ByteReader
+ "ReadFrom": {[]string{"=io.Reader"}, []string{"int64", "error"}}, // io.ReaderFrom
+ "ReadRune": {[]string{}, []string{"rune", "int", "error"}}, // io.RuneReader
+ "Scan": {[]string{"=fmt.ScanState", "rune"}, []string{"error"}}, // fmt.Scanner
+ "Seek": {[]string{"=int64", "int"}, []string{"int64", "error"}}, // io.Seeker
+ "UnmarshalJSON": {[]string{"[]byte"}, []string{"error"}}, // json.Unmarshaler
+ "UnmarshalXML": {[]string{"*xml.Decoder", "xml.StartElement"}, []string{"error"}}, // xml.Unmarshaler
+ "UnreadByte": {[]string{}, []string{"error"}},
+ "UnreadRune": {[]string{}, []string{"error"}},
+ "WriteByte": {[]string{"byte"}, []string{"error"}}, // jpeg.writer (matching bufio.Writer)
+ "WriteTo": {[]string{"=io.Writer"}, []string{"int64", "error"}}, // io.WriterTo
+}
+
+func checkCanonicalMethod(f *File, node ast.Node) {
+ switch n := node.(type) {
+ case *ast.FuncDecl:
+ if n.Recv != nil {
+ canonicalMethod(f, n.Name, n.Type)
+ }
+ case *ast.InterfaceType:
+ for _, field := range n.Methods.List {
+ for _, id := range field.Names {
+ canonicalMethod(f, id, field.Type.(*ast.FuncType))
+ }
+ }
+ }
+}
+
+func canonicalMethod(f *File, id *ast.Ident, t *ast.FuncType) {
+ // Expected input/output.
+ expect, ok := canonicalMethods[id.Name]
+ if !ok {
+ return
+ }
+
+ // Actual input/output
+ args := typeFlatten(t.Params.List)
+ var results []ast.Expr
+ if t.Results != nil {
+ results = typeFlatten(t.Results.List)
+ }
+
+ // Do the =s (if any) all match?
+ if !f.matchParams(expect.args, args, "=") || !f.matchParams(expect.results, results, "=") {
+ return
+ }
+
+ // Everything must match.
+ if !f.matchParams(expect.args, args, "") || !f.matchParams(expect.results, results, "") {
+ expectFmt := id.Name + "(" + argjoin(expect.args) + ")"
+ if len(expect.results) == 1 {
+ expectFmt += " " + argjoin(expect.results)
+ } else if len(expect.results) > 1 {
+ expectFmt += " (" + argjoin(expect.results) + ")"
+ }
+
+ f.b.Reset()
+ if err := printer.Fprint(&f.b, f.fset, t); err != nil {
+ fmt.Fprintf(&f.b, "<%s>", err)
+ }
+ actual := f.b.String()
+ actual = strings.TrimPrefix(actual, "func")
+ actual = id.Name + actual
+
+ f.Badf(id.Pos(), "method %s should have signature %s", actual, expectFmt)
+ }
+}
+
+func argjoin(x []string) string {
+ y := make([]string, len(x))
+ for i, s := range x {
+ if s[0] == '=' {
+ s = s[1:]
+ }
+ y[i] = s
+ }
+ return strings.Join(y, ", ")
+}
+
+// Turn parameter list into slice of types
+// (in the ast, types are Exprs).
+// Have to handle f(int, bool) and f(x, y, z int)
+// so not a simple 1-to-1 conversion.
+func typeFlatten(l []*ast.Field) []ast.Expr {
+ var t []ast.Expr
+ for _, f := range l {
+ if len(f.Names) == 0 {
+ t = append(t, f.Type)
+ continue
+ }
+ for _ = range f.Names {
+ t = append(t, f.Type)
+ }
+ }
+ return t
+}
+
+// Does each type in expect with the given prefix match the corresponding type in actual?
+func (f *File) matchParams(expect []string, actual []ast.Expr, prefix string) bool {
+ for i, x := range expect {
+ if !strings.HasPrefix(x, prefix) {
+ continue
+ }
+ if i >= len(actual) {
+ return false
+ }
+ if !f.matchParamType(x, actual[i]) {
+ return false
+ }
+ }
+ if prefix == "" && len(actual) > len(expect) {
+ return false
+ }
+ return true
+}
+
+// Does this one type match?
+func (f *File) matchParamType(expect string, actual ast.Expr) bool {
+ if strings.HasPrefix(expect, "=") {
+ expect = expect[1:]
+ }
+ // Strip package name if we're in that package.
+ if n := len(f.file.Name.Name); len(expect) > n && expect[:n] == f.file.Name.Name && expect[n] == '.' {
+ expect = expect[n+1:]
+ }
+
+ // Overkill but easy.
+ f.b.Reset()
+ printer.Fprint(&f.b, f.fset, actual)
+ return f.b.String() == expect
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/nilfunc.go b/llgo/third_party/go.tools/cmd/vet/nilfunc.go
new file mode 100644
index 0000000000000000000000000000000000000000..0ed874fd36fd69b315ad62ae1d2f9ca15d62beb0
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/nilfunc.go
@@ -0,0 +1,68 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+This file contains the code to check for useless function comparisons.
+A useless comparison is one like f == nil as opposed to f() == nil.
+*/
+
+package main
+
+import (
+ "go/ast"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+func init() {
+ register("nilfunc",
+ "check for comparisons between functions and nil",
+ checkNilFuncComparison,
+ binaryExpr)
+}
+
+func checkNilFuncComparison(f *File, node ast.Node) {
+ e := node.(*ast.BinaryExpr)
+
+ // Only want == or != comparisons.
+ if e.Op != token.EQL && e.Op != token.NEQ {
+ return
+ }
+
+ // Only want comparisons with a nil identifier on one side.
+ var e2 ast.Expr
+ switch {
+ case f.isNil(e.X):
+ e2 = e.Y
+ case f.isNil(e.Y):
+ e2 = e.X
+ default:
+ return
+ }
+
+ // Only want identifiers or selector expressions.
+ var obj types.Object
+ switch v := e2.(type) {
+ case *ast.Ident:
+ obj = f.pkg.uses[v]
+ case *ast.SelectorExpr:
+ obj = f.pkg.uses[v.Sel]
+ default:
+ return
+ }
+
+ // Only want functions.
+ if _, ok := obj.(*types.Func); !ok {
+ return
+ }
+
+ f.Badf(e.Pos(), "comparison of function %v %v nil is always %v", obj.Name(), e.Op, e.Op == token.NEQ)
+}
+
+// isNil reports whether the provided expression is the built-in nil
+// identifier.
+func (f *File) isNil(e ast.Expr) bool {
+ return f.pkg.types[e].Type == types.Typ[types.UntypedNil]
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/print.go b/llgo/third_party/go.tools/cmd/vet/print.go
new file mode 100644
index 0000000000000000000000000000000000000000..572f615f90798dbac56793afd38ff0d188c50a48
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/print.go
@@ -0,0 +1,557 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the printf-checker.
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "go/ast"
+ "go/token"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+var printfuncs = flag.String("printfuncs", "", "comma-separated list of print function names to check")
+
+func init() {
+ register("printf",
+ "check printf-like invocations",
+ checkFmtPrintfCall,
+ funcDecl, callExpr)
+}
+
+// printfList records the formatted-print functions. The value is the location
+// of the format parameter. Names are lower-cased so the lookup is
+// case insensitive.
+var printfList = map[string]int{
+ "errorf": 0,
+ "fatalf": 0,
+ "fprintf": 1,
+ "logf": 0,
+ "panicf": 0,
+ "printf": 0,
+ "sprintf": 0,
+}
+
+// printList records the unformatted-print functions. The value is the location
+// of the first parameter to be printed. Names are lower-cased so the lookup is
+// case insensitive.
+var printList = map[string]int{
+ "error": 0,
+ "fatal": 0,
+ "fprint": 1, "fprintln": 1,
+ "log": 0,
+ "panic": 0, "panicln": 0,
+ "print": 0, "println": 0,
+ "sprint": 0, "sprintln": 0,
+}
+
+// checkCall triggers the print-specific checks if the call invokes a print function.
+func checkFmtPrintfCall(f *File, node ast.Node) {
+ if d, ok := node.(*ast.FuncDecl); ok && isStringer(f, d) {
+ // Remember we saw this.
+ if f.stringers == nil {
+ f.stringers = make(map[*ast.Object]bool)
+ }
+ if l := d.Recv.List; len(l) == 1 {
+ if n := l[0].Names; len(n) == 1 {
+ f.stringers[n[0].Obj] = true
+ }
+ }
+ return
+ }
+
+ call, ok := node.(*ast.CallExpr)
+ if !ok {
+ return
+ }
+ var Name string
+ switch x := call.Fun.(type) {
+ case *ast.Ident:
+ Name = x.Name
+ case *ast.SelectorExpr:
+ Name = x.Sel.Name
+ default:
+ return
+ }
+
+ name := strings.ToLower(Name)
+ if skip, ok := printfList[name]; ok {
+ f.checkPrintf(call, Name, skip)
+ return
+ }
+ if skip, ok := printList[name]; ok {
+ f.checkPrint(call, Name, skip)
+ return
+ }
+}
+
+// isStringer returns true if the provided declaration is a "String() string"
+// method, an implementation of fmt.Stringer.
+func isStringer(f *File, d *ast.FuncDecl) bool {
+ return d.Recv != nil && d.Name.Name == "String" && d.Type.Results != nil &&
+ len(d.Type.Params.List) == 0 && len(d.Type.Results.List) == 1 &&
+ f.pkg.types[d.Type.Results.List[0].Type].Type == types.Typ[types.String]
+}
+
+// formatState holds the parsed representation of a printf directive such as "%3.*[4]d".
+// It is constructed by parsePrintfVerb.
+type formatState struct {
+ verb rune // the format verb: 'd' for "%d"
+ format string // the full format directive from % through verb, "%.3d".
+ name string // Printf, Sprintf etc.
+ flags []byte // the list of # + etc.
+ argNums []int // the successive argument numbers that are consumed, adjusted to refer to actual arg in call
+ indexed bool // whether an indexing expression appears: %[1]d.
+ firstArg int // Index of first argument after the format in the Printf call.
+ // Used only during parse.
+ file *File
+ call *ast.CallExpr
+ argNum int // Which argument we're expecting to format now.
+ indexPending bool // Whether we have an indexed argument that has not resolved.
+ nbytes int // number of bytes of the format string consumed.
+}
+
+// checkPrintf checks a call to a formatted print routine such as Printf.
+// call.Args[formatIndex] is (well, should be) the format argument.
+func (f *File) checkPrintf(call *ast.CallExpr, name string, formatIndex int) {
+ if formatIndex >= len(call.Args) {
+ f.Bad(call.Pos(), "too few arguments in call to", name)
+ return
+ }
+ lit := f.pkg.types[call.Args[formatIndex]].Value
+ if lit == nil {
+ if *verbose {
+ f.Warn(call.Pos(), "can't check non-constant format in call to", name)
+ }
+ return
+ }
+ if lit.Kind() != exact.String {
+ f.Badf(call.Pos(), "constant %v not a string in call to %s", lit, name)
+ return
+ }
+ format := exact.StringVal(lit)
+ firstArg := formatIndex + 1 // Arguments are immediately after format string.
+ if !strings.Contains(format, "%") {
+ if len(call.Args) > firstArg {
+ f.Badf(call.Pos(), "no formatting directive in %s call", name)
+ }
+ return
+ }
+ // Hard part: check formats against args.
+ argNum := firstArg
+ indexed := false
+ for i, w := 0, 0; i < len(format); i += w {
+ w = 1
+ if format[i] == '%' {
+ state := f.parsePrintfVerb(call, name, format[i:], firstArg, argNum)
+ if state == nil {
+ return
+ }
+ w = len(state.format)
+ if state.indexed {
+ indexed = true
+ }
+ if !f.okPrintfArg(call, state) { // One error per format is enough.
+ return
+ }
+ if len(state.argNums) > 0 {
+ // Continue with the next sequential argument.
+ argNum = state.argNums[len(state.argNums)-1] + 1
+ }
+ }
+ }
+ // Dotdotdot is hard.
+ if call.Ellipsis.IsValid() && argNum >= len(call.Args)-1 {
+ return
+ }
+ // If the arguments were direct indexed, we assume the programmer knows what's up.
+ // Otherwise, there should be no leftover arguments.
+ if !indexed && argNum != len(call.Args) {
+ expect := argNum - firstArg
+ numArgs := len(call.Args) - firstArg
+ f.Badf(call.Pos(), "wrong number of args for format in %s call: %d needed but %d args", name, expect, numArgs)
+ }
+}
+
+// parseFlags accepts any printf flags.
+func (s *formatState) parseFlags() {
+ for s.nbytes < len(s.format) {
+ switch c := s.format[s.nbytes]; c {
+ case '#', '0', '+', '-', ' ':
+ s.flags = append(s.flags, c)
+ s.nbytes++
+ default:
+ return
+ }
+ }
+}
+
+// scanNum advances through a decimal number if present.
+func (s *formatState) scanNum() {
+ for ; s.nbytes < len(s.format); s.nbytes++ {
+ c := s.format[s.nbytes]
+ if c < '0' || '9' < c {
+ return
+ }
+ }
+}
+
+// parseIndex scans an index expression. It returns false if there is a syntax error.
+func (s *formatState) parseIndex() bool {
+ if s.nbytes == len(s.format) || s.format[s.nbytes] != '[' {
+ return true
+ }
+ // Argument index present.
+ s.indexed = true
+ s.nbytes++ // skip '['
+ start := s.nbytes
+ s.scanNum()
+ if s.nbytes == len(s.format) || s.nbytes == start || s.format[s.nbytes] != ']' {
+ s.file.Badf(s.call.Pos(), "illegal syntax for printf argument index")
+ return false
+ }
+ arg32, err := strconv.ParseInt(s.format[start:s.nbytes], 10, 32)
+ if err != nil {
+ s.file.Badf(s.call.Pos(), "illegal syntax for printf argument index: %s", err)
+ return false
+ }
+ s.nbytes++ // skip ']'
+ arg := int(arg32)
+ arg += s.firstArg - 1 // We want to zero-index the actual arguments.
+ s.argNum = arg
+ s.indexPending = true
+ return true
+}
+
+// parseNum scans a width or precision (or *). It returns false if there's a bad index expression.
+func (s *formatState) parseNum() bool {
+ if s.nbytes < len(s.format) && s.format[s.nbytes] == '*' {
+ if s.indexPending { // Absorb it.
+ s.indexPending = false
+ }
+ s.nbytes++
+ s.argNums = append(s.argNums, s.argNum)
+ s.argNum++
+ } else {
+ s.scanNum()
+ }
+ return true
+}
+
+// parsePrecision scans for a precision. It returns false if there's a bad index expression.
+func (s *formatState) parsePrecision() bool {
+ // If there's a period, there may be a precision.
+ if s.nbytes < len(s.format) && s.format[s.nbytes] == '.' {
+ s.flags = append(s.flags, '.') // Treat precision as a flag.
+ s.nbytes++
+ if !s.parseIndex() {
+ return false
+ }
+ if !s.parseNum() {
+ return false
+ }
+ }
+ return true
+}
+
+// parsePrintfVerb looks the formatting directive that begins the format string
+// and returns a formatState that encodes what the directive wants, without looking
+// at the actual arguments present in the call. The result is nil if there is an error.
+func (f *File) parsePrintfVerb(call *ast.CallExpr, name, format string, firstArg, argNum int) *formatState {
+ state := &formatState{
+ format: format,
+ name: name,
+ flags: make([]byte, 0, 5),
+ argNum: argNum,
+ argNums: make([]int, 0, 1),
+ nbytes: 1, // There's guaranteed to be a percent sign.
+ indexed: false,
+ firstArg: firstArg,
+ file: f,
+ call: call,
+ }
+ // There may be flags.
+ state.parseFlags()
+ indexPending := false
+ // There may be an index.
+ if !state.parseIndex() {
+ return nil
+ }
+ // There may be a width.
+ if !state.parseNum() {
+ return nil
+ }
+ // There may be a precision.
+ if !state.parsePrecision() {
+ return nil
+ }
+ // Now a verb, possibly prefixed by an index (which we may already have).
+ if !indexPending && !state.parseIndex() {
+ return nil
+ }
+ if state.nbytes == len(state.format) {
+ f.Badf(call.Pos(), "missing verb at end of format string in %s call", name)
+ return nil
+ }
+ verb, w := utf8.DecodeRuneInString(state.format[state.nbytes:])
+ state.verb = verb
+ state.nbytes += w
+ if verb != '%' {
+ state.argNums = append(state.argNums, state.argNum)
+ }
+ state.format = state.format[:state.nbytes]
+ return state
+}
+
+// printfArgType encodes the types of expressions a printf verb accepts. It is a bitmask.
+type printfArgType int
+
+const (
+ argBool printfArgType = 1 << iota
+ argInt
+ argRune
+ argString
+ argFloat
+ argComplex
+ argPointer
+ anyType printfArgType = ^0
+)
+
+type printVerb struct {
+ verb rune // User may provide verb through Formatter; could be a rune.
+ flags string // known flags are all ASCII
+ typ printfArgType
+}
+
+// Common flag sets for printf verbs.
+const (
+ noFlag = ""
+ numFlag = " -+.0"
+ sharpNumFlag = " -+.0#"
+ allFlags = " -+.0#"
+)
+
+// printVerbs identifies which flags are known to printf for each verb.
+// TODO: A type that implements Formatter may do what it wants, and vet
+// will complain incorrectly.
+var printVerbs = []printVerb{
+ // '-' is a width modifier, always valid.
+ // '.' is a precision for float, max width for strings.
+ // '+' is required sign for numbers, Go format for %v.
+ // '#' is alternate format for several verbs.
+ // ' ' is spacer for numbers
+ {'%', noFlag, 0},
+ {'b', numFlag, argInt | argFloat | argComplex},
+ {'c', "-", argRune | argInt},
+ {'d', numFlag, argInt},
+ {'e', numFlag, argFloat | argComplex},
+ {'E', numFlag, argFloat | argComplex},
+ {'f', numFlag, argFloat | argComplex},
+ {'F', numFlag, argFloat | argComplex},
+ {'g', numFlag, argFloat | argComplex},
+ {'G', numFlag, argFloat | argComplex},
+ {'o', sharpNumFlag, argInt},
+ {'p', "-#", argPointer},
+ {'q', " -+.0#", argRune | argInt | argString},
+ {'s', " -+.0", argString},
+ {'t', "-", argBool},
+ {'T', "-", anyType},
+ {'U', "-#", argRune | argInt},
+ {'v', allFlags, anyType},
+ {'x', sharpNumFlag, argRune | argInt | argString},
+ {'X', sharpNumFlag, argRune | argInt | argString},
+}
+
+// okPrintfArg compares the formatState to the arguments actually present,
+// reporting any discrepancies it can discern. If the final argument is ellipsissed,
+// there's little it can do for that.
+func (f *File) okPrintfArg(call *ast.CallExpr, state *formatState) (ok bool) {
+ var v printVerb
+ found := false
+ // Linear scan is fast enough for a small list.
+ for _, v = range printVerbs {
+ if v.verb == state.verb {
+ found = true
+ break
+ }
+ }
+ if !found {
+ f.Badf(call.Pos(), "unrecognized printf verb %q", state.verb)
+ return false
+ }
+ for _, flag := range state.flags {
+ if !strings.ContainsRune(v.flags, rune(flag)) {
+ f.Badf(call.Pos(), "unrecognized printf flag for verb %q: %q", state.verb, flag)
+ return false
+ }
+ }
+ // Verb is good. If len(state.argNums)>trueArgs, we have something like %.*s and all
+ // but the final arg must be an integer.
+ trueArgs := 1
+ if state.verb == '%' {
+ trueArgs = 0
+ }
+ nargs := len(state.argNums)
+ for i := 0; i < nargs-trueArgs; i++ {
+ argNum := state.argNums[i]
+ if !f.argCanBeChecked(call, i, true, state) {
+ return
+ }
+ arg := call.Args[argNum]
+ if !f.matchArgType(argInt, nil, arg) {
+ f.Badf(call.Pos(), "arg %s for * in printf format not of type int", f.gofmt(arg))
+ return false
+ }
+ }
+ if state.verb == '%' {
+ return true
+ }
+ argNum := state.argNums[len(state.argNums)-1]
+ if !f.argCanBeChecked(call, len(state.argNums)-1, false, state) {
+ return false
+ }
+ arg := call.Args[argNum]
+ if !f.matchArgType(v.typ, nil, arg) {
+ typeString := ""
+ if typ := f.pkg.types[arg].Type; typ != nil {
+ typeString = typ.String()
+ }
+ f.Badf(call.Pos(), "arg %s for printf verb %%%c of wrong type: %s", f.gofmt(arg), state.verb, typeString)
+ return false
+ }
+ if v.typ&argString != 0 && v.verb != 'T' && !bytes.Contains(state.flags, []byte{'#'}) && f.recursiveStringer(arg) {
+ f.Badf(call.Pos(), "arg %s for printf causes recursive call to String method", f.gofmt(arg))
+ return false
+ }
+ return true
+}
+
+// recursiveStringer reports whether the provided argument is r or &r for the
+// fmt.Stringer receiver identifier r.
+func (f *File) recursiveStringer(e ast.Expr) bool {
+ if len(f.stringers) == 0 {
+ return false
+ }
+ var obj *ast.Object
+ switch e := e.(type) {
+ case *ast.Ident:
+ obj = e.Obj
+ case *ast.UnaryExpr:
+ if id, ok := e.X.(*ast.Ident); ok && e.Op == token.AND {
+ obj = id.Obj
+ }
+ }
+
+ // It's unlikely to be a recursive stringer if it has a Format method.
+ if typ := f.pkg.types[e].Type; typ != nil {
+ // Not a perfect match; see issue 6259.
+ if f.hasMethod(typ, "Format") {
+ return false
+ }
+ }
+
+ // We compare the underlying Object, which checks that the identifier
+ // is the one we declared as the receiver for the String method in
+ // which this printf appears.
+ return f.stringers[obj]
+}
+
+// argCanBeChecked reports whether the specified argument is statically present;
+// it may be beyond the list of arguments or in a terminal slice... argument, which
+// means we can't see it.
+func (f *File) argCanBeChecked(call *ast.CallExpr, formatArg int, isStar bool, state *formatState) bool {
+ argNum := state.argNums[formatArg]
+ if argNum < 0 {
+ // Shouldn't happen, so catch it with prejudice.
+ panic("negative arg num")
+ }
+ if argNum < len(call.Args)-1 {
+ return true // Always OK.
+ }
+ if call.Ellipsis.IsValid() {
+ return false // We just can't tell; there could be many more arguments.
+ }
+ if argNum < len(call.Args) {
+ return true
+ }
+ // There are bad indexes in the format or there are fewer arguments than the format needs.
+ // This is the argument number relative to the format: Printf("%s", "hi") will give 1 for the "hi".
+ arg := argNum - state.firstArg + 1 // People think of arguments as 1-indexed.
+ f.Badf(call.Pos(), `missing argument for %s("%s"): format reads arg %d, have only %d args`, state.name, state.format, arg, len(call.Args)-state.firstArg)
+ return false
+}
+
+// checkPrint checks a call to an unformatted print routine such as Println.
+// call.Args[firstArg] is the first argument to be printed.
+func (f *File) checkPrint(call *ast.CallExpr, name string, firstArg int) {
+ isLn := strings.HasSuffix(name, "ln")
+ isF := strings.HasPrefix(name, "F")
+ args := call.Args
+ if name == "Log" && len(args) > 0 {
+ // Special case: Don't complain about math.Log or cmplx.Log.
+ // Not strictly necessary because the only complaint likely is for Log("%d")
+ // but it feels wrong to check that math.Log is a good print function.
+ if sel, ok := args[0].(*ast.SelectorExpr); ok {
+ if x, ok := sel.X.(*ast.Ident); ok {
+ if x.Name == "math" || x.Name == "cmplx" {
+ return
+ }
+ }
+ }
+ }
+ // check for Println(os.Stderr, ...)
+ if firstArg == 0 && !isF && len(args) > 0 {
+ if sel, ok := args[0].(*ast.SelectorExpr); ok {
+ if x, ok := sel.X.(*ast.Ident); ok {
+ if x.Name == "os" && strings.HasPrefix(sel.Sel.Name, "Std") {
+ f.Badf(call.Pos(), "first argument to %s is %s.%s", name, x.Name, sel.Sel.Name)
+ }
+ }
+ }
+ }
+ if len(args) <= firstArg {
+ // If we have a call to a method called Error that satisfies the Error interface,
+ // then it's ok. Otherwise it's something like (*T).Error from the testing package
+ // and we need to check it.
+ if name == "Error" && f.isErrorMethodCall(call) {
+ return
+ }
+ // If it's an Error call now, it's probably for printing errors.
+ if !isLn {
+ // Check the signature to be sure: there are niladic functions called "error".
+ if firstArg != 0 || f.numArgsInSignature(call) != firstArg {
+ f.Badf(call.Pos(), "no args in %s call", name)
+ }
+ }
+ return
+ }
+ arg := args[firstArg]
+ if lit, ok := arg.(*ast.BasicLit); ok && lit.Kind == token.STRING {
+ if strings.Contains(lit.Value, "%") {
+ f.Badf(call.Pos(), "possible formatting directive in %s call", name)
+ }
+ }
+ if isLn {
+ // The last item, if a string, should not have a newline.
+ arg = args[len(call.Args)-1]
+ if lit, ok := arg.(*ast.BasicLit); ok && lit.Kind == token.STRING {
+ if strings.HasSuffix(lit.Value, `\n"`) {
+ f.Badf(call.Pos(), "%s call ends with newline", name)
+ }
+ }
+ }
+ for _, arg := range args {
+ if f.recursiveStringer(arg) {
+ f.Badf(call.Pos(), "arg %s for print causes recursive call to String method", f.gofmt(arg))
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/rangeloop.go b/llgo/third_party/go.tools/cmd/vet/rangeloop.go
new file mode 100644
index 0000000000000000000000000000000000000000..96e2ca806267591bbe8d76dc106326bc7e801505
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/rangeloop.go
@@ -0,0 +1,70 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+This file contains the code to check range loop variables bound inside function
+literals that are deferred or launched in new goroutines. We only check
+instances where the defer or go statement is the last statement in the loop
+body, as otherwise we would need whole program analysis.
+
+For example:
+
+ for i, v := range s {
+ go func() {
+ println(i, v) // not what you might expect
+ }()
+ }
+
+See: http://golang.org/doc/go_faq.html#closures_and_goroutines
+*/
+
+package main
+
+import "go/ast"
+
+func init() {
+ register("rangeloops",
+ "check that range loop variables are used correctly",
+ checkRangeLoop,
+ rangeStmt)
+}
+
+// checkRangeLoop walks the body of the provided range statement, checking if
+// its index or value variables are used unsafely inside goroutines or deferred
+// function literals.
+func checkRangeLoop(f *File, node ast.Node) {
+ n := node.(*ast.RangeStmt)
+ key, _ := n.Key.(*ast.Ident)
+ val, _ := n.Value.(*ast.Ident)
+ if key == nil && val == nil {
+ return
+ }
+ sl := n.Body.List
+ if len(sl) == 0 {
+ return
+ }
+ var last *ast.CallExpr
+ switch s := sl[len(sl)-1].(type) {
+ case *ast.GoStmt:
+ last = s.Call
+ case *ast.DeferStmt:
+ last = s.Call
+ default:
+ return
+ }
+ lit, ok := last.Fun.(*ast.FuncLit)
+ if !ok {
+ return
+ }
+ ast.Inspect(lit.Body, func(n ast.Node) bool {
+ id, ok := n.(*ast.Ident)
+ if !ok || id.Obj == nil {
+ return true
+ }
+ if key != nil && id.Obj == key.Obj || val != nil && id.Obj == val.Obj {
+ f.Bad(id.Pos(), "range variable", id.Name, "captured by func literal")
+ }
+ return true
+ })
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/shadow.go b/llgo/third_party/go.tools/cmd/vet/shadow.go
new file mode 100644
index 0000000000000000000000000000000000000000..1ba360a782504893f9478ca8732e3a9634b52547
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/shadow.go
@@ -0,0 +1,245 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+This file contains the code to check for shadowed variables.
+A shadowed variable is a variable declared in an inner scope
+with the same name and type as a variable in an outer scope,
+and where the outer variable is mentioned after the inner one
+is declared.
+
+(This definition can be refined; the module generates too many
+false positives and is not yet enabled by default.)
+
+For example:
+
+ func BadRead(f *os.File, buf []byte) error {
+ var err error
+ for {
+ n, err := f.Read(buf) // shadows the function variable 'err'
+ if err != nil {
+ break // causes return of wrong value
+ }
+ foo(buf)
+ }
+ return err
+ }
+
+*/
+
+package main
+
+import (
+ "flag"
+ "go/ast"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+var strictShadowing = flag.Bool("shadowstrict", false, "whether to be strict about shadowing; can be noisy")
+
+func init() {
+ register("shadow",
+ "check for shadowed variables (experimental; must be set explicitly)",
+ checkShadow,
+ assignStmt, genDecl)
+ experimental["shadow"] = true
+}
+
+func checkShadow(f *File, node ast.Node) {
+ switch n := node.(type) {
+ case *ast.AssignStmt:
+ checkShadowAssignment(f, n)
+ case *ast.GenDecl:
+ checkShadowDecl(f, n)
+ }
+}
+
+// Span stores the minimum range of byte positions in the file in which a
+// given variable (types.Object) is mentioned. It is lexically defined: it spans
+// from the beginning of its first mention to the end of its last mention.
+// A variable is considered shadowed (if *strictShadowing is off) only if the
+// shadowing variable is declared within the span of the shadowed variable.
+// In other words, if a variable is shadowed but not used after the shadowed
+// variable is declared, it is inconsequential and not worth complaining about.
+// This simple check dramatically reduces the nuisance rate for the shadowing
+// check, at least until something cleverer comes along.
+//
+// One wrinkle: A "naked return" is a silent use of a variable that the Span
+// will not capture, but the compilers catch naked returns of shadowed
+// variables so we don't need to.
+//
+// Cases this gets wrong (TODO):
+// - If a for loop's continuation statement mentions a variable redeclared in
+// the block, we should complain about it but don't.
+// - A variable declared inside a function literal can falsely be identified
+// as shadowing a variable in the outer function.
+//
+type Span struct {
+ min token.Pos
+ max token.Pos
+}
+
+// contains reports whether the position is inside the span.
+func (s Span) contains(pos token.Pos) bool {
+ return s.min <= pos && pos < s.max
+}
+
+// growSpan expands the span for the object to contain the instance represented
+// by the identifier.
+func (pkg *Package) growSpan(ident *ast.Ident, obj types.Object) {
+ if *strictShadowing {
+ return // No need
+ }
+ pos := ident.Pos()
+ end := ident.End()
+ span, ok := pkg.spans[obj]
+ if ok {
+ if span.min > pos {
+ span.min = pos
+ }
+ if span.max < end {
+ span.max = end
+ }
+ } else {
+ span = Span{pos, end}
+ }
+ pkg.spans[obj] = span
+}
+
+// checkShadowAssignment checks for shadowing in a short variable declaration.
+func checkShadowAssignment(f *File, a *ast.AssignStmt) {
+ if a.Tok != token.DEFINE {
+ return
+ }
+ if f.idiomaticShortRedecl(a) {
+ return
+ }
+ for _, expr := range a.Lhs {
+ ident, ok := expr.(*ast.Ident)
+ if !ok {
+ f.Badf(expr.Pos(), "invalid AST: short variable declaration of non-identifier")
+ return
+ }
+ checkShadowing(f, ident)
+ }
+}
+
+// idiomaticShortRedecl reports whether this short declaration can be ignored for
+// the purposes of shadowing, that is, that any redeclarations it contains are deliberate.
+func (f *File) idiomaticShortRedecl(a *ast.AssignStmt) bool {
+ // Don't complain about deliberate redeclarations of the form
+ // i := i
+ // Such constructs are idiomatic in range loops to create a new variable
+ // for each iteration. Another example is
+ // switch n := n.(type)
+ if len(a.Rhs) != len(a.Lhs) {
+ return false
+ }
+ // We know it's an assignment, so the LHS must be all identifiers. (We check anyway.)
+ for i, expr := range a.Lhs {
+ lhs, ok := expr.(*ast.Ident)
+ if !ok {
+ f.Badf(expr.Pos(), "invalid AST: short variable declaration of non-identifier")
+ return true // Don't do any more processing.
+ }
+ switch rhs := a.Rhs[i].(type) {
+ case *ast.Ident:
+ if lhs.Name != rhs.Name {
+ return false
+ }
+ case *ast.TypeAssertExpr:
+ if id, ok := rhs.X.(*ast.Ident); ok {
+ if lhs.Name != id.Name {
+ return false
+ }
+ }
+ }
+ }
+ return true
+}
+
+// idiomaticRedecl reports whether this declaration spec can be ignored for
+// the purposes of shadowing, that is, that any redeclarations it contains are deliberate.
+func (f *File) idiomaticRedecl(d *ast.ValueSpec) bool {
+ // Don't complain about deliberate redeclarations of the form
+ // var i, j = i, j
+ if len(d.Names) != len(d.Values) {
+ return false
+ }
+ for i, lhs := range d.Names {
+ if rhs, ok := d.Values[i].(*ast.Ident); ok {
+ if lhs.Name != rhs.Name {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// checkShadowDecl checks for shadowing in a general variable declaration.
+func checkShadowDecl(f *File, d *ast.GenDecl) {
+ if d.Tok != token.VAR {
+ return
+ }
+ for _, spec := range d.Specs {
+ valueSpec, ok := spec.(*ast.ValueSpec)
+ if !ok {
+ f.Badf(spec.Pos(), "invalid AST: var GenDecl not ValueSpec")
+ return
+ }
+ // Don't complain about deliberate redeclarations of the form
+ // var i = i
+ if f.idiomaticRedecl(valueSpec) {
+ return
+ }
+ for _, ident := range valueSpec.Names {
+ checkShadowing(f, ident)
+ }
+ }
+}
+
+// checkShadowing checks whether the identifier shadows an identifier in an outer scope.
+func checkShadowing(f *File, ident *ast.Ident) {
+ if ident.Name == "_" {
+ // Can't shadow the blank identifier.
+ return
+ }
+ obj := f.pkg.defs[ident]
+ if obj == nil {
+ return
+ }
+ // obj.Parent.Parent is the surrounding scope. If we can find another declaration
+ // starting from there, we have a shadowed identifier.
+ _, shadowed := obj.Parent().Parent().LookupParent(obj.Name())
+ if shadowed == nil {
+ return
+ }
+ // Don't complain if it's shadowing a universe-declared identifier; that's fine.
+ if shadowed.Parent() == types.Universe {
+ return
+ }
+ if *strictShadowing {
+ // The shadowed identifier must appear before this one to be an instance of shadowing.
+ if shadowed.Pos() > ident.Pos() {
+ return
+ }
+ } else {
+ // Don't complain if the span of validity of the shadowed identifier doesn't include
+ // the shadowing identifier.
+ span, ok := f.pkg.spans[shadowed]
+ if !ok {
+ f.Badf(ident.Pos(), "internal error: no range for %s", ident.Name)
+ return
+ }
+ if !span.contains(ident.Pos()) {
+ return
+ }
+ }
+ // Don't complain if the types differ: that implies the programmer really wants two different things.
+ if types.Identical(obj.Type(), shadowed.Type()) {
+ f.Badf(ident.Pos(), "declaration of %s shadows declaration at %s", obj.Name(), f.loc(shadowed.Pos()))
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/shift.go b/llgo/third_party/go.tools/cmd/vet/shift.go
new file mode 100644
index 0000000000000000000000000000000000000000..c6c07e0d937481cc49cf5a5487bdad08ff97bb98
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/shift.go
@@ -0,0 +1,83 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+This file contains the code to check for suspicious shifts.
+*/
+
+package main
+
+import (
+ "go/ast"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+func init() {
+ register("shift",
+ "check for useless shifts",
+ checkShift,
+ binaryExpr, assignStmt)
+}
+
+func checkShift(f *File, node ast.Node) {
+ switch node := node.(type) {
+ case *ast.BinaryExpr:
+ if node.Op == token.SHL || node.Op == token.SHR {
+ checkLongShift(f, node, node.X, node.Y)
+ }
+ case *ast.AssignStmt:
+ if len(node.Lhs) != 1 || len(node.Rhs) != 1 {
+ return
+ }
+ if node.Tok == token.SHL_ASSIGN || node.Tok == token.SHR_ASSIGN {
+ checkLongShift(f, node, node.Lhs[0], node.Rhs[0])
+ }
+ }
+}
+
+// checkLongShift checks if shift or shift-assign operations shift by more than
+// the length of the underlying variable.
+func checkLongShift(f *File, node ast.Node, x, y ast.Expr) {
+ v := f.pkg.types[y].Value
+ if v == nil {
+ return
+ }
+ amt, ok := exact.Int64Val(v)
+ if !ok {
+ return
+ }
+ t := f.pkg.types[x].Type
+ if t == nil {
+ return
+ }
+ b, ok := t.Underlying().(*types.Basic)
+ if !ok {
+ return
+ }
+ var size int64
+ var msg string
+ switch b.Kind() {
+ case types.Uint8, types.Int8:
+ size = 8
+ case types.Uint16, types.Int16:
+ size = 16
+ case types.Uint32, types.Int32:
+ size = 32
+ case types.Uint64, types.Int64:
+ size = 64
+ case types.Int, types.Uint, types.Uintptr:
+ // These types may be as small as 32 bits, but no smaller.
+ size = 32
+ msg = "might be "
+ default:
+ return
+ }
+ if amt >= size {
+ ident := f.gofmt(x)
+ f.Badf(node.Pos(), "%s %stoo small for shift of %d", ident, msg, amt)
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/structtag.go b/llgo/third_party/go.tools/cmd/vet/structtag.go
new file mode 100644
index 0000000000000000000000000000000000000000..5da390462c2effae4b17084b5c366fbaf025d82a
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/structtag.go
@@ -0,0 +1,62 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the test for canonical struct tags.
+
+package main
+
+import (
+ "go/ast"
+ "reflect"
+ "strconv"
+)
+
+func init() {
+ register("structtags",
+ "check that struct field tags have canonical format and apply to exported fields as needed",
+ checkCanonicalFieldTag,
+ field)
+}
+
+// checkCanonicalFieldTag checks a struct field tag.
+func checkCanonicalFieldTag(f *File, node ast.Node) {
+ field := node.(*ast.Field)
+ if field.Tag == nil {
+ return
+ }
+
+ tag, err := strconv.Unquote(field.Tag.Value)
+ if err != nil {
+ f.Badf(field.Pos(), "unable to read struct tag %s", field.Tag.Value)
+ return
+ }
+
+ // Check tag for validity by appending
+ // new key:value to end and checking that
+ // the tag parsing code can find it.
+ st := reflect.StructTag(tag + ` _gofix:"_magic"`)
+ if st.Get("_gofix") != "_magic" {
+ f.Badf(field.Pos(), "struct field tag %s not compatible with reflect.StructTag.Get", field.Tag.Value)
+ return
+ }
+
+ // Check for use of json or xml tags with unexported fields.
+
+ // Embedded struct. Nothing to do for now, but that
+ // may change, depending on what happens with issue 7363.
+ if len(field.Names) == 0 {
+ return
+ }
+
+ if field.Names[0].IsExported() {
+ return
+ }
+
+ for _, enc := range [...]string{"json", "xml"} {
+ if st.Get(enc) != "" {
+ f.Badf(field.Pos(), "struct field %s has %s tag but is not exported", field.Names[0].Name, enc)
+ return
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/asm.go b/llgo/third_party/go.tools/cmd/vet/testdata/asm.go
new file mode 100644
index 0000000000000000000000000000000000000000..9a3d5315ad56c05697981c9425d6515e7609864f
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/asm.go
@@ -0,0 +1,33 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// This file contains declarations to test the assembly in test_asm.s.
+
+package testdata
+
+func arg1(x int8, y uint8)
+func arg2(x int16, y uint16)
+func arg4(x int32, y uint32)
+func arg8(x int64, y uint64)
+func argint(x int, y uint)
+func argptr(x *byte, y *byte, c chan int, m map[int]int, f func())
+func argstring(x, y string)
+func argslice(x, y []string)
+func argiface(x interface{}, y interface {
+ m()
+})
+func returnint() int
+func returnbyte(x int) byte
+func returnnamed(x byte) (r1 int, r2 int16, r3 string, r4 byte)
+func returnintmissing() int
+func leaf(x, y int) int
+
+func noprof(x int)
+func dupok(x int)
+func nosplit(x int)
+func rodata(x int)
+func noptr(x int)
+func wrapper(x int)
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/asm1.s b/llgo/third_party/go.tools/cmd/vet/testdata/asm1.s
new file mode 100644
index 0000000000000000000000000000000000000000..62f423cd8ba205e320bdb8a53b21ade781163d50
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/asm1.s
@@ -0,0 +1,254 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64
+// +build vet_test
+
+TEXT ·arg1(SB),0,$0-2
+ MOVB x+0(FP), AX
+ // MOVB x+0(FP), AX // commented out instructions used to panic
+ MOVB y+1(FP), BX
+ MOVW x+0(FP), AX // ERROR "\[amd64\] arg1: invalid MOVW of x\+0\(FP\); int8 is 1-byte value"
+ MOVW y+1(FP), AX // ERROR "invalid MOVW of y\+1\(FP\); uint8 is 1-byte value"
+ MOVL x+0(FP), AX // ERROR "invalid MOVL of x\+0\(FP\); int8 is 1-byte value"
+ MOVL y+1(FP), AX // ERROR "invalid MOVL of y\+1\(FP\); uint8 is 1-byte value"
+ MOVQ x+0(FP), AX // ERROR "invalid MOVQ of x\+0\(FP\); int8 is 1-byte value"
+ MOVQ y+1(FP), AX // ERROR "invalid MOVQ of y\+1\(FP\); uint8 is 1-byte value"
+ MOVB x+1(FP), AX // ERROR "invalid offset x\+1\(FP\); expected x\+0\(FP\)"
+ MOVB y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+1\(FP\)"
+ TESTB x+0(FP), AX
+ TESTB y+1(FP), BX
+ TESTW x+0(FP), AX // ERROR "invalid TESTW of x\+0\(FP\); int8 is 1-byte value"
+ TESTW y+1(FP), AX // ERROR "invalid TESTW of y\+1\(FP\); uint8 is 1-byte value"
+ TESTL x+0(FP), AX // ERROR "invalid TESTL of x\+0\(FP\); int8 is 1-byte value"
+ TESTL y+1(FP), AX // ERROR "invalid TESTL of y\+1\(FP\); uint8 is 1-byte value"
+ TESTQ x+0(FP), AX // ERROR "invalid TESTQ of x\+0\(FP\); int8 is 1-byte value"
+ TESTQ y+1(FP), AX // ERROR "invalid TESTQ of y\+1\(FP\); uint8 is 1-byte value"
+ TESTB x+1(FP), AX // ERROR "invalid offset x\+1\(FP\); expected x\+0\(FP\)"
+ TESTB y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+1\(FP\)"
+ MOVB 8(SP), AX // ERROR "8\(SP\) should be x\+0\(FP\)"
+ MOVB 9(SP), AX // ERROR "9\(SP\) should be y\+1\(FP\)"
+ MOVB 10(SP), AX // ERROR "use of 10\(SP\) points beyond argument frame"
+ RET
+
+TEXT ·arg2(SB),0,$0-4
+ MOVB x+0(FP), AX // ERROR "arg2: invalid MOVB of x\+0\(FP\); int16 is 2-byte value"
+ MOVB y+2(FP), AX // ERROR "invalid MOVB of y\+2\(FP\); uint16 is 2-byte value"
+ MOVW x+0(FP), AX
+ MOVW y+2(FP), BX
+ MOVL x+0(FP), AX // ERROR "invalid MOVL of x\+0\(FP\); int16 is 2-byte value"
+ MOVL y+2(FP), AX // ERROR "invalid MOVL of y\+2\(FP\); uint16 is 2-byte value"
+ MOVQ x+0(FP), AX // ERROR "invalid MOVQ of x\+0\(FP\); int16 is 2-byte value"
+ MOVQ y+2(FP), AX // ERROR "invalid MOVQ of y\+2\(FP\); uint16 is 2-byte value"
+ MOVW x+2(FP), AX // ERROR "invalid offset x\+2\(FP\); expected x\+0\(FP\)"
+ MOVW y+0(FP), AX // ERROR "invalid offset y\+0\(FP\); expected y\+2\(FP\)"
+ TESTB x+0(FP), AX // ERROR "invalid TESTB of x\+0\(FP\); int16 is 2-byte value"
+ TESTB y+2(FP), AX // ERROR "invalid TESTB of y\+2\(FP\); uint16 is 2-byte value"
+ TESTW x+0(FP), AX
+ TESTW y+2(FP), BX
+ TESTL x+0(FP), AX // ERROR "invalid TESTL of x\+0\(FP\); int16 is 2-byte value"
+ TESTL y+2(FP), AX // ERROR "invalid TESTL of y\+2\(FP\); uint16 is 2-byte value"
+ TESTQ x+0(FP), AX // ERROR "invalid TESTQ of x\+0\(FP\); int16 is 2-byte value"
+ TESTQ y+2(FP), AX // ERROR "invalid TESTQ of y\+2\(FP\); uint16 is 2-byte value"
+ TESTW x+2(FP), AX // ERROR "invalid offset x\+2\(FP\); expected x\+0\(FP\)"
+ TESTW y+0(FP), AX // ERROR "invalid offset y\+0\(FP\); expected y\+2\(FP\)"
+ RET
+
+TEXT ·arg4(SB),0,$0-2 // ERROR "arg4: wrong argument size 2; expected \$\.\.\.-8"
+ MOVB x+0(FP), AX // ERROR "invalid MOVB of x\+0\(FP\); int32 is 4-byte value"
+ MOVB y+4(FP), BX // ERROR "invalid MOVB of y\+4\(FP\); uint32 is 4-byte value"
+ MOVW x+0(FP), AX // ERROR "invalid MOVW of x\+0\(FP\); int32 is 4-byte value"
+ MOVW y+4(FP), AX // ERROR "invalid MOVW of y\+4\(FP\); uint32 is 4-byte value"
+ MOVL x+0(FP), AX
+ MOVL y+4(FP), AX
+ MOVQ x+0(FP), AX // ERROR "invalid MOVQ of x\+0\(FP\); int32 is 4-byte value"
+ MOVQ y+4(FP), AX // ERROR "invalid MOVQ of y\+4\(FP\); uint32 is 4-byte value"
+ MOVL x+4(FP), AX // ERROR "invalid offset x\+4\(FP\); expected x\+0\(FP\)"
+ MOVL y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+4\(FP\)"
+ TESTB x+0(FP), AX // ERROR "invalid TESTB of x\+0\(FP\); int32 is 4-byte value"
+ TESTB y+4(FP), BX // ERROR "invalid TESTB of y\+4\(FP\); uint32 is 4-byte value"
+ TESTW x+0(FP), AX // ERROR "invalid TESTW of x\+0\(FP\); int32 is 4-byte value"
+ TESTW y+4(FP), AX // ERROR "invalid TESTW of y\+4\(FP\); uint32 is 4-byte value"
+ TESTL x+0(FP), AX
+ TESTL y+4(FP), AX
+ TESTQ x+0(FP), AX // ERROR "invalid TESTQ of x\+0\(FP\); int32 is 4-byte value"
+ TESTQ y+4(FP), AX // ERROR "invalid TESTQ of y\+4\(FP\); uint32 is 4-byte value"
+ TESTL x+4(FP), AX // ERROR "invalid offset x\+4\(FP\); expected x\+0\(FP\)"
+ TESTL y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+4\(FP\)"
+ RET
+
+TEXT ·arg8(SB),7,$0-2 // ERROR "wrong argument size 2; expected \$\.\.\.-16"
+ MOVB x+0(FP), AX // ERROR "invalid MOVB of x\+0\(FP\); int64 is 8-byte value"
+ MOVB y+8(FP), BX // ERROR "invalid MOVB of y\+8\(FP\); uint64 is 8-byte value"
+ MOVW x+0(FP), AX // ERROR "invalid MOVW of x\+0\(FP\); int64 is 8-byte value"
+ MOVW y+8(FP), AX // ERROR "invalid MOVW of y\+8\(FP\); uint64 is 8-byte value"
+ MOVL x+0(FP), AX // ERROR "invalid MOVL of x\+0\(FP\); int64 is 8-byte value"
+ MOVL y+8(FP), AX // ERROR "invalid MOVL of y\+8\(FP\); uint64 is 8-byte value"
+ MOVQ x+0(FP), AX
+ MOVQ y+8(FP), AX
+ MOVQ x+8(FP), AX // ERROR "invalid offset x\+8\(FP\); expected x\+0\(FP\)"
+ MOVQ y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+8\(FP\)"
+ TESTB x+0(FP), AX // ERROR "invalid TESTB of x\+0\(FP\); int64 is 8-byte value"
+ TESTB y+8(FP), BX // ERROR "invalid TESTB of y\+8\(FP\); uint64 is 8-byte value"
+ TESTW x+0(FP), AX // ERROR "invalid TESTW of x\+0\(FP\); int64 is 8-byte value"
+ TESTW y+8(FP), AX // ERROR "invalid TESTW of y\+8\(FP\); uint64 is 8-byte value"
+ TESTL x+0(FP), AX // ERROR "invalid TESTL of x\+0\(FP\); int64 is 8-byte value"
+ TESTL y+8(FP), AX // ERROR "invalid TESTL of y\+8\(FP\); uint64 is 8-byte value"
+ TESTQ x+0(FP), AX
+ TESTQ y+8(FP), AX
+ TESTQ x+8(FP), AX // ERROR "invalid offset x\+8\(FP\); expected x\+0\(FP\)"
+ TESTQ y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+8\(FP\)"
+ RET
+
+TEXT ·argint(SB),0,$0-2 // ERROR "wrong argument size 2; expected \$\.\.\.-16"
+ MOVB x+0(FP), AX // ERROR "invalid MOVB of x\+0\(FP\); int is 8-byte value"
+ MOVB y+8(FP), BX // ERROR "invalid MOVB of y\+8\(FP\); uint is 8-byte value"
+ MOVW x+0(FP), AX // ERROR "invalid MOVW of x\+0\(FP\); int is 8-byte value"
+ MOVW y+8(FP), AX // ERROR "invalid MOVW of y\+8\(FP\); uint is 8-byte value"
+ MOVL x+0(FP), AX // ERROR "invalid MOVL of x\+0\(FP\); int is 8-byte value"
+ MOVL y+8(FP), AX // ERROR "invalid MOVL of y\+8\(FP\); uint is 8-byte value"
+ MOVQ x+0(FP), AX
+ MOVQ y+8(FP), AX
+ MOVQ x+8(FP), AX // ERROR "invalid offset x\+8\(FP\); expected x\+0\(FP\)"
+ MOVQ y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+8\(FP\)"
+ TESTB x+0(FP), AX // ERROR "invalid TESTB of x\+0\(FP\); int is 8-byte value"
+ TESTB y+8(FP), BX // ERROR "invalid TESTB of y\+8\(FP\); uint is 8-byte value"
+ TESTW x+0(FP), AX // ERROR "invalid TESTW of x\+0\(FP\); int is 8-byte value"
+ TESTW y+8(FP), AX // ERROR "invalid TESTW of y\+8\(FP\); uint is 8-byte value"
+ TESTL x+0(FP), AX // ERROR "invalid TESTL of x\+0\(FP\); int is 8-byte value"
+ TESTL y+8(FP), AX // ERROR "invalid TESTL of y\+8\(FP\); uint is 8-byte value"
+ TESTQ x+0(FP), AX
+ TESTQ y+8(FP), AX
+ TESTQ x+8(FP), AX // ERROR "invalid offset x\+8\(FP\); expected x\+0\(FP\)"
+ TESTQ y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+8\(FP\)"
+ RET
+
+TEXT ·argptr(SB),7,$0-2 // ERROR "wrong argument size 2; expected \$\.\.\.-40"
+ MOVB x+0(FP), AX // ERROR "invalid MOVB of x\+0\(FP\); \*byte is 8-byte value"
+ MOVB y+8(FP), BX // ERROR "invalid MOVB of y\+8\(FP\); \*byte is 8-byte value"
+ MOVW x+0(FP), AX // ERROR "invalid MOVW of x\+0\(FP\); \*byte is 8-byte value"
+ MOVW y+8(FP), AX // ERROR "invalid MOVW of y\+8\(FP\); \*byte is 8-byte value"
+ MOVL x+0(FP), AX // ERROR "invalid MOVL of x\+0\(FP\); \*byte is 8-byte value"
+ MOVL y+8(FP), AX // ERROR "invalid MOVL of y\+8\(FP\); \*byte is 8-byte value"
+ MOVQ x+0(FP), AX
+ MOVQ y+8(FP), AX
+ MOVQ x+8(FP), AX // ERROR "invalid offset x\+8\(FP\); expected x\+0\(FP\)"
+ MOVQ y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+8\(FP\)"
+ TESTB x+0(FP), AX // ERROR "invalid TESTB of x\+0\(FP\); \*byte is 8-byte value"
+ TESTB y+8(FP), BX // ERROR "invalid TESTB of y\+8\(FP\); \*byte is 8-byte value"
+ TESTW x+0(FP), AX // ERROR "invalid TESTW of x\+0\(FP\); \*byte is 8-byte value"
+ TESTW y+8(FP), AX // ERROR "invalid TESTW of y\+8\(FP\); \*byte is 8-byte value"
+ TESTL x+0(FP), AX // ERROR "invalid TESTL of x\+0\(FP\); \*byte is 8-byte value"
+ TESTL y+8(FP), AX // ERROR "invalid TESTL of y\+8\(FP\); \*byte is 8-byte value"
+ TESTQ x+0(FP), AX
+ TESTQ y+8(FP), AX
+ TESTQ x+8(FP), AX // ERROR "invalid offset x\+8\(FP\); expected x\+0\(FP\)"
+ TESTQ y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+8\(FP\)"
+ MOVL c+16(FP), AX // ERROR "invalid MOVL of c\+16\(FP\); chan int is 8-byte value"
+ MOVL m+24(FP), AX // ERROR "invalid MOVL of m\+24\(FP\); map\[int\]int is 8-byte value"
+ MOVL f+32(FP), AX // ERROR "invalid MOVL of f\+32\(FP\); func\(\) is 8-byte value"
+ RET
+
+TEXT ·argstring(SB),0,$32 // ERROR "wrong argument size 0; expected \$\.\.\.-32"
+ MOVW x+0(FP), AX // ERROR "invalid MOVW of x\+0\(FP\); string base is 8-byte value"
+ MOVL x+0(FP), AX // ERROR "invalid MOVL of x\+0\(FP\); string base is 8-byte value"
+ MOVQ x+0(FP), AX
+ MOVW x_base+0(FP), AX // ERROR "invalid MOVW of x_base\+0\(FP\); string base is 8-byte value"
+ MOVL x_base+0(FP), AX // ERROR "invalid MOVL of x_base\+0\(FP\); string base is 8-byte value"
+ MOVQ x_base+0(FP), AX
+ MOVW x_len+0(FP), AX // ERROR "invalid offset x_len\+0\(FP\); expected x_len\+8\(FP\)"
+ MOVL x_len+0(FP), AX // ERROR "invalid offset x_len\+0\(FP\); expected x_len\+8\(FP\)"
+ MOVQ x_len+0(FP), AX // ERROR "invalid offset x_len\+0\(FP\); expected x_len\+8\(FP\)"
+ MOVW x_len+8(FP), AX // ERROR "invalid MOVW of x_len\+8\(FP\); string len is 8-byte value"
+ MOVL x_len+8(FP), AX // ERROR "invalid MOVL of x_len\+8\(FP\); string len is 8-byte value"
+ MOVQ x_len+8(FP), AX
+ MOVQ y+0(FP), AX // ERROR "invalid offset y\+0\(FP\); expected y\+16\(FP\)"
+ MOVQ y_len+8(FP), AX // ERROR "invalid offset y_len\+8\(FP\); expected y_len\+24\(FP\)"
+ RET
+
+TEXT ·argslice(SB),0,$48 // ERROR "wrong argument size 0; expected \$\.\.\.-48"
+ MOVW x+0(FP), AX // ERROR "invalid MOVW of x\+0\(FP\); slice base is 8-byte value"
+ MOVL x+0(FP), AX // ERROR "invalid MOVL of x\+0\(FP\); slice base is 8-byte value"
+ MOVQ x+0(FP), AX
+ MOVW x_base+0(FP), AX // ERROR "invalid MOVW of x_base\+0\(FP\); slice base is 8-byte value"
+ MOVL x_base+0(FP), AX // ERROR "invalid MOVL of x_base\+0\(FP\); slice base is 8-byte value"
+ MOVQ x_base+0(FP), AX
+ MOVW x_len+0(FP), AX // ERROR "invalid offset x_len\+0\(FP\); expected x_len\+8\(FP\)"
+ MOVL x_len+0(FP), AX // ERROR "invalid offset x_len\+0\(FP\); expected x_len\+8\(FP\)"
+ MOVQ x_len+0(FP), AX // ERROR "invalid offset x_len\+0\(FP\); expected x_len\+8\(FP\)"
+ MOVW x_len+8(FP), AX // ERROR "invalid MOVW of x_len\+8\(FP\); slice len is 8-byte value"
+ MOVL x_len+8(FP), AX // ERROR "invalid MOVL of x_len\+8\(FP\); slice len is 8-byte value"
+ MOVQ x_len+8(FP), AX
+ MOVW x_cap+0(FP), AX // ERROR "invalid offset x_cap\+0\(FP\); expected x_cap\+16\(FP\)"
+ MOVL x_cap+0(FP), AX // ERROR "invalid offset x_cap\+0\(FP\); expected x_cap\+16\(FP\)"
+ MOVQ x_cap+0(FP), AX // ERROR "invalid offset x_cap\+0\(FP\); expected x_cap\+16\(FP\)"
+ MOVW x_cap+16(FP), AX // ERROR "invalid MOVW of x_cap\+16\(FP\); slice cap is 8-byte value"
+ MOVL x_cap+16(FP), AX // ERROR "invalid MOVL of x_cap\+16\(FP\); slice cap is 8-byte value"
+ MOVQ x_cap+16(FP), AX
+ MOVQ y+0(FP), AX // ERROR "invalid offset y\+0\(FP\); expected y\+24\(FP\)"
+ MOVQ y_len+8(FP), AX // ERROR "invalid offset y_len\+8\(FP\); expected y_len\+32\(FP\)"
+ MOVQ y_cap+16(FP), AX // ERROR "invalid offset y_cap\+16\(FP\); expected y_cap\+40\(FP\)"
+ RET
+
+TEXT ·argiface(SB),0,$0-32
+ MOVW x+0(FP), AX // ERROR "invalid MOVW of x\+0\(FP\); interface type is 8-byte value"
+ MOVL x+0(FP), AX // ERROR "invalid MOVL of x\+0\(FP\); interface type is 8-byte value"
+ MOVQ x+0(FP), AX
+ MOVW x_type+0(FP), AX // ERROR "invalid MOVW of x_type\+0\(FP\); interface type is 8-byte value"
+ MOVL x_type+0(FP), AX // ERROR "invalid MOVL of x_type\+0\(FP\); interface type is 8-byte value"
+ MOVQ x_type+0(FP), AX
+ MOVQ x_itable+0(FP), AX // ERROR "unknown variable x_itable; offset 0 is x_type\+0\(FP\)"
+ MOVQ x_itable+1(FP), AX // ERROR "unknown variable x_itable; offset 1 is x_type\+0\(FP\)"
+ MOVW x_data+0(FP), AX // ERROR "invalid offset x_data\+0\(FP\); expected x_data\+8\(FP\)"
+ MOVL x_data+0(FP), AX // ERROR "invalid offset x_data\+0\(FP\); expected x_data\+8\(FP\)"
+ MOVQ x_data+0(FP), AX // ERROR "invalid offset x_data\+0\(FP\); expected x_data\+8\(FP\)"
+ MOVW x_data+8(FP), AX // ERROR "invalid MOVW of x_data\+8\(FP\); interface data is 8-byte value"
+ MOVL x_data+8(FP), AX // ERROR "invalid MOVL of x_data\+8\(FP\); interface data is 8-byte value"
+ MOVQ x_data+8(FP), AX
+ MOVW y+16(FP), AX // ERROR "invalid MOVW of y\+16\(FP\); interface itable is 8-byte value"
+ MOVL y+16(FP), AX // ERROR "invalid MOVL of y\+16\(FP\); interface itable is 8-byte value"
+ MOVQ y+16(FP), AX
+ MOVW y_itable+16(FP), AX // ERROR "invalid MOVW of y_itable\+16\(FP\); interface itable is 8-byte value"
+ MOVL y_itable+16(FP), AX // ERROR "invalid MOVL of y_itable\+16\(FP\); interface itable is 8-byte value"
+ MOVQ y_itable+16(FP), AX
+ MOVQ y_type+16(FP), AX // ERROR "unknown variable y_type; offset 16 is y_itable\+16\(FP\)"
+ MOVW y_data+16(FP), AX // ERROR "invalid offset y_data\+16\(FP\); expected y_data\+24\(FP\)"
+ MOVL y_data+16(FP), AX // ERROR "invalid offset y_data\+16\(FP\); expected y_data\+24\(FP\)"
+ MOVQ y_data+16(FP), AX // ERROR "invalid offset y_data\+16\(FP\); expected y_data\+24\(FP\)"
+ MOVW y_data+24(FP), AX // ERROR "invalid MOVW of y_data\+24\(FP\); interface data is 8-byte value"
+ MOVL y_data+24(FP), AX // ERROR "invalid MOVL of y_data\+24\(FP\); interface data is 8-byte value"
+ MOVQ y_data+24(FP), AX
+ RET
+
+TEXT ·returnint(SB),0,$0-8
+ MOVB AX, ret+0(FP) // ERROR "invalid MOVB of ret\+0\(FP\); int is 8-byte value"
+ MOVW AX, ret+0(FP) // ERROR "invalid MOVW of ret\+0\(FP\); int is 8-byte value"
+ MOVL AX, ret+0(FP) // ERROR "invalid MOVL of ret\+0\(FP\); int is 8-byte value"
+ MOVQ AX, ret+0(FP)
+ MOVQ AX, ret+1(FP) // ERROR "invalid offset ret\+1\(FP\); expected ret\+0\(FP\)"
+ MOVQ AX, r+0(FP) // ERROR "unknown variable r; offset 0 is ret\+0\(FP\)"
+ RET
+
+TEXT ·returnbyte(SB),0,$0-9
+ MOVQ x+0(FP), AX
+ MOVB AX, ret+8(FP)
+ MOVW AX, ret+8(FP) // ERROR "invalid MOVW of ret\+8\(FP\); byte is 1-byte value"
+ MOVL AX, ret+8(FP) // ERROR "invalid MOVL of ret\+8\(FP\); byte is 1-byte value"
+ MOVQ AX, ret+8(FP) // ERROR "invalid MOVQ of ret\+8\(FP\); byte is 1-byte value"
+ MOVB AX, ret+7(FP) // ERROR "invalid offset ret\+7\(FP\); expected ret\+8\(FP\)"
+ RET
+
+TEXT ·returnnamed(SB),0,$0-41
+ MOVB x+0(FP), AX
+ MOVQ AX, r1+8(FP)
+ MOVW AX, r2+16(FP)
+ MOVQ AX, r3+24(FP)
+ MOVQ AX, r3_base+24(FP)
+ MOVQ AX, r3_len+32(FP)
+ MOVB AX, r4+40(FP)
+ MOVL AX, r1+8(FP) // ERROR "invalid MOVL of r1\+8\(FP\); int is 8-byte value"
+ RET
+
+TEXT ·returnintmissing(SB),0,$0-8
+ RET // ERROR "RET without writing to 8-byte ret\+0\(FP\)"
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/asm2.s b/llgo/third_party/go.tools/cmd/vet/testdata/asm2.s
new file mode 100644
index 0000000000000000000000000000000000000000..c33c02a70b27e34ad5205501eb0bd935750be51d
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/asm2.s
@@ -0,0 +1,257 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build 386
+// +build vet_test
+
+TEXT ·arg1(SB),0,$0-2
+ MOVB x+0(FP), AX
+ MOVB y+1(FP), BX
+ MOVW x+0(FP), AX // ERROR "\[386\] arg1: invalid MOVW of x\+0\(FP\); int8 is 1-byte value"
+ MOVW y+1(FP), AX // ERROR "invalid MOVW of y\+1\(FP\); uint8 is 1-byte value"
+ MOVL x+0(FP), AX // ERROR "invalid MOVL of x\+0\(FP\); int8 is 1-byte value"
+ MOVL y+1(FP), AX // ERROR "invalid MOVL of y\+1\(FP\); uint8 is 1-byte value"
+ MOVQ x+0(FP), AX // ERROR "invalid MOVQ of x\+0\(FP\); int8 is 1-byte value"
+ MOVQ y+1(FP), AX // ERROR "invalid MOVQ of y\+1\(FP\); uint8 is 1-byte value"
+ MOVB x+1(FP), AX // ERROR "invalid offset x\+1\(FP\); expected x\+0\(FP\)"
+ MOVB y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+1\(FP\)"
+ TESTB x+0(FP), AX
+ TESTB y+1(FP), BX
+ TESTW x+0(FP), AX // ERROR "invalid TESTW of x\+0\(FP\); int8 is 1-byte value"
+ TESTW y+1(FP), AX // ERROR "invalid TESTW of y\+1\(FP\); uint8 is 1-byte value"
+ TESTL x+0(FP), AX // ERROR "invalid TESTL of x\+0\(FP\); int8 is 1-byte value"
+ TESTL y+1(FP), AX // ERROR "invalid TESTL of y\+1\(FP\); uint8 is 1-byte value"
+ TESTQ x+0(FP), AX // ERROR "invalid TESTQ of x\+0\(FP\); int8 is 1-byte value"
+ TESTQ y+1(FP), AX // ERROR "invalid TESTQ of y\+1\(FP\); uint8 is 1-byte value"
+ TESTB x+1(FP), AX // ERROR "invalid offset x\+1\(FP\); expected x\+0\(FP\)"
+ TESTB y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+1\(FP\)"
+ MOVB 4(SP), AX // ERROR "4\(SP\) should be x\+0\(FP\)"
+ MOVB 5(SP), AX // ERROR "5\(SP\) should be y\+1\(FP\)"
+ MOVB 6(SP), AX // ERROR "use of 6\(SP\) points beyond argument frame"
+ RET
+
+TEXT ·arg2(SB),0,$0-4
+ MOVB x+0(FP), AX // ERROR "arg2: invalid MOVB of x\+0\(FP\); int16 is 2-byte value"
+ MOVB y+2(FP), AX // ERROR "invalid MOVB of y\+2\(FP\); uint16 is 2-byte value"
+ MOVW x+0(FP), AX
+ MOVW y+2(FP), BX
+ MOVL x+0(FP), AX // ERROR "invalid MOVL of x\+0\(FP\); int16 is 2-byte value"
+ MOVL y+2(FP), AX // ERROR "invalid MOVL of y\+2\(FP\); uint16 is 2-byte value"
+ MOVQ x+0(FP), AX // ERROR "invalid MOVQ of x\+0\(FP\); int16 is 2-byte value"
+ MOVQ y+2(FP), AX // ERROR "invalid MOVQ of y\+2\(FP\); uint16 is 2-byte value"
+ MOVW x+2(FP), AX // ERROR "invalid offset x\+2\(FP\); expected x\+0\(FP\)"
+ MOVW y+0(FP), AX // ERROR "invalid offset y\+0\(FP\); expected y\+2\(FP\)"
+ TESTB x+0(FP), AX // ERROR "invalid TESTB of x\+0\(FP\); int16 is 2-byte value"
+ TESTB y+2(FP), AX // ERROR "invalid TESTB of y\+2\(FP\); uint16 is 2-byte value"
+ TESTW x+0(FP), AX
+ TESTW y+2(FP), BX
+ TESTL x+0(FP), AX // ERROR "invalid TESTL of x\+0\(FP\); int16 is 2-byte value"
+ TESTL y+2(FP), AX // ERROR "invalid TESTL of y\+2\(FP\); uint16 is 2-byte value"
+ TESTQ x+0(FP), AX // ERROR "invalid TESTQ of x\+0\(FP\); int16 is 2-byte value"
+ TESTQ y+2(FP), AX // ERROR "invalid TESTQ of y\+2\(FP\); uint16 is 2-byte value"
+ TESTW x+2(FP), AX // ERROR "invalid offset x\+2\(FP\); expected x\+0\(FP\)"
+ TESTW y+0(FP), AX // ERROR "invalid offset y\+0\(FP\); expected y\+2\(FP\)"
+ RET
+
+TEXT ·arg4(SB),0,$0-2 // ERROR "arg4: wrong argument size 2; expected \$\.\.\.-8"
+ MOVB x+0(FP), AX // ERROR "invalid MOVB of x\+0\(FP\); int32 is 4-byte value"
+ MOVB y+4(FP), BX // ERROR "invalid MOVB of y\+4\(FP\); uint32 is 4-byte value"
+ MOVW x+0(FP), AX // ERROR "invalid MOVW of x\+0\(FP\); int32 is 4-byte value"
+ MOVW y+4(FP), AX // ERROR "invalid MOVW of y\+4\(FP\); uint32 is 4-byte value"
+ MOVL x+0(FP), AX
+ MOVL y+4(FP), AX
+ MOVQ x+0(FP), AX // ERROR "invalid MOVQ of x\+0\(FP\); int32 is 4-byte value"
+ MOVQ y+4(FP), AX // ERROR "invalid MOVQ of y\+4\(FP\); uint32 is 4-byte value"
+ MOVL x+4(FP), AX // ERROR "invalid offset x\+4\(FP\); expected x\+0\(FP\)"
+ MOVL y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+4\(FP\)"
+ TESTB x+0(FP), AX // ERROR "invalid TESTB of x\+0\(FP\); int32 is 4-byte value"
+ TESTB y+4(FP), BX // ERROR "invalid TESTB of y\+4\(FP\); uint32 is 4-byte value"
+ TESTW x+0(FP), AX // ERROR "invalid TESTW of x\+0\(FP\); int32 is 4-byte value"
+ TESTW y+4(FP), AX // ERROR "invalid TESTW of y\+4\(FP\); uint32 is 4-byte value"
+ TESTL x+0(FP), AX
+ TESTL y+4(FP), AX
+ TESTQ x+0(FP), AX // ERROR "invalid TESTQ of x\+0\(FP\); int32 is 4-byte value"
+ TESTQ y+4(FP), AX // ERROR "invalid TESTQ of y\+4\(FP\); uint32 is 4-byte value"
+ TESTL x+4(FP), AX // ERROR "invalid offset x\+4\(FP\); expected x\+0\(FP\)"
+ TESTL y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+4\(FP\)"
+ RET
+
+TEXT ·arg8(SB),7,$0-2 // ERROR "wrong argument size 2; expected \$\.\.\.-16"
+ MOVB x+0(FP), AX // ERROR "invalid MOVB of x\+0\(FP\); int64 is 8-byte value"
+ MOVB y+8(FP), BX // ERROR "invalid MOVB of y\+8\(FP\); uint64 is 8-byte value"
+ MOVW x+0(FP), AX // ERROR "invalid MOVW of x\+0\(FP\); int64 is 8-byte value"
+ MOVW y+8(FP), AX // ERROR "invalid MOVW of y\+8\(FP\); uint64 is 8-byte value"
+ MOVL x+0(FP), AX // ERROR "invalid MOVL of x\+0\(FP\); int64 is 8-byte value containing x_lo\+0\(FP\) and x_hi\+4\(FP\)"
+ MOVL x_lo+0(FP), AX
+ MOVL x_hi+4(FP), AX
+ MOVL y+8(FP), AX // ERROR "invalid MOVL of y\+8\(FP\); uint64 is 8-byte value containing y_lo\+8\(FP\) and y_hi\+12\(FP\)"
+ MOVL y_lo+8(FP), AX
+ MOVL y_hi+12(FP), AX
+ MOVQ x+0(FP), AX
+ MOVQ y+8(FP), AX
+ MOVQ x+8(FP), AX // ERROR "invalid offset x\+8\(FP\); expected x\+0\(FP\)"
+ MOVQ y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+8\(FP\)"
+ TESTB x+0(FP), AX // ERROR "invalid TESTB of x\+0\(FP\); int64 is 8-byte value"
+ TESTB y+8(FP), BX // ERROR "invalid TESTB of y\+8\(FP\); uint64 is 8-byte value"
+ TESTW x+0(FP), AX // ERROR "invalid TESTW of x\+0\(FP\); int64 is 8-byte value"
+ TESTW y+8(FP), AX // ERROR "invalid TESTW of y\+8\(FP\); uint64 is 8-byte value"
+ TESTL x+0(FP), AX // ERROR "invalid TESTL of x\+0\(FP\); int64 is 8-byte value containing x_lo\+0\(FP\) and x_hi\+4\(FP\)"
+ TESTL y+8(FP), AX // ERROR "invalid TESTL of y\+8\(FP\); uint64 is 8-byte value containing y_lo\+8\(FP\) and y_hi\+12\(FP\)"
+ TESTQ x+0(FP), AX
+ TESTQ y+8(FP), AX
+ TESTQ x+8(FP), AX // ERROR "invalid offset x\+8\(FP\); expected x\+0\(FP\)"
+ TESTQ y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+8\(FP\)"
+ RET
+
+TEXT ·argint(SB),0,$0-2 // ERROR "wrong argument size 2; expected \$\.\.\.-8"
+ MOVB x+0(FP), AX // ERROR "invalid MOVB of x\+0\(FP\); int is 4-byte value"
+ MOVB y+4(FP), BX // ERROR "invalid MOVB of y\+4\(FP\); uint is 4-byte value"
+ MOVW x+0(FP), AX // ERROR "invalid MOVW of x\+0\(FP\); int is 4-byte value"
+ MOVW y+4(FP), AX // ERROR "invalid MOVW of y\+4\(FP\); uint is 4-byte value"
+ MOVL x+0(FP), AX
+ MOVL y+4(FP), AX
+ MOVQ x+0(FP), AX // ERROR "invalid MOVQ of x\+0\(FP\); int is 4-byte value"
+ MOVQ y+4(FP), AX // ERROR "invalid MOVQ of y\+4\(FP\); uint is 4-byte value"
+ MOVQ x+4(FP), AX // ERROR "invalid offset x\+4\(FP\); expected x\+0\(FP\)"
+ MOVQ y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+4\(FP\)"
+ TESTB x+0(FP), AX // ERROR "invalid TESTB of x\+0\(FP\); int is 4-byte value"
+ TESTB y+4(FP), BX // ERROR "invalid TESTB of y\+4\(FP\); uint is 4-byte value"
+ TESTW x+0(FP), AX // ERROR "invalid TESTW of x\+0\(FP\); int is 4-byte value"
+ TESTW y+4(FP), AX // ERROR "invalid TESTW of y\+4\(FP\); uint is 4-byte value"
+ TESTL x+0(FP), AX
+ TESTL y+4(FP), AX
+ TESTQ x+0(FP), AX // ERROR "invalid TESTQ of x\+0\(FP\); int is 4-byte value"
+ TESTQ y+4(FP), AX // ERROR "invalid TESTQ of y\+4\(FP\); uint is 4-byte value"
+ TESTQ x+4(FP), AX // ERROR "invalid offset x\+4\(FP\); expected x\+0\(FP\)"
+ TESTQ y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+4\(FP\)"
+ RET
+
+TEXT ·argptr(SB),7,$0-2 // ERROR "wrong argument size 2; expected \$\.\.\.-20"
+ MOVB x+0(FP), AX // ERROR "invalid MOVB of x\+0\(FP\); \*byte is 4-byte value"
+ MOVB y+4(FP), BX // ERROR "invalid MOVB of y\+4\(FP\); \*byte is 4-byte value"
+ MOVW x+0(FP), AX // ERROR "invalid MOVW of x\+0\(FP\); \*byte is 4-byte value"
+ MOVW y+4(FP), AX // ERROR "invalid MOVW of y\+4\(FP\); \*byte is 4-byte value"
+ MOVL x+0(FP), AX
+ MOVL y+4(FP), AX
+ MOVQ x+0(FP), AX // ERROR "invalid MOVQ of x\+0\(FP\); \*byte is 4-byte value"
+ MOVQ y+4(FP), AX // ERROR "invalid MOVQ of y\+4\(FP\); \*byte is 4-byte value"
+ MOVQ x+4(FP), AX // ERROR "invalid offset x\+4\(FP\); expected x\+0\(FP\)"
+ MOVQ y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+4\(FP\)"
+ TESTB x+0(FP), AX // ERROR "invalid TESTB of x\+0\(FP\); \*byte is 4-byte value"
+ TESTB y+4(FP), BX // ERROR "invalid TESTB of y\+4\(FP\); \*byte is 4-byte value"
+ TESTW x+0(FP), AX // ERROR "invalid TESTW of x\+0\(FP\); \*byte is 4-byte value"
+ TESTW y+4(FP), AX // ERROR "invalid TESTW of y\+4\(FP\); \*byte is 4-byte value"
+ TESTL x+0(FP), AX
+ TESTL y+4(FP), AX
+ TESTQ x+0(FP), AX // ERROR "invalid TESTQ of x\+0\(FP\); \*byte is 4-byte value"
+ TESTQ y+4(FP), AX // ERROR "invalid TESTQ of y\+4\(FP\); \*byte is 4-byte value"
+ TESTQ x+4(FP), AX // ERROR "invalid offset x\+4\(FP\); expected x\+0\(FP\)"
+ TESTQ y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+4\(FP\)"
+ MOVW c+8(FP), AX // ERROR "invalid MOVW of c\+8\(FP\); chan int is 4-byte value"
+ MOVW m+12(FP), AX // ERROR "invalid MOVW of m\+12\(FP\); map\[int\]int is 4-byte value"
+ MOVW f+16(FP), AX // ERROR "invalid MOVW of f\+16\(FP\); func\(\) is 4-byte value"
+ RET
+
+TEXT ·argstring(SB),0,$16 // ERROR "wrong argument size 0; expected \$\.\.\.-16"
+ MOVW x+0(FP), AX // ERROR "invalid MOVW of x\+0\(FP\); string base is 4-byte value"
+ MOVL x+0(FP), AX
+ MOVQ x+0(FP), AX // ERROR "invalid MOVQ of x\+0\(FP\); string base is 4-byte value"
+ MOVW x_base+0(FP), AX // ERROR "invalid MOVW of x_base\+0\(FP\); string base is 4-byte value"
+ MOVL x_base+0(FP), AX
+ MOVQ x_base+0(FP), AX // ERROR "invalid MOVQ of x_base\+0\(FP\); string base is 4-byte value"
+ MOVW x_len+0(FP), AX // ERROR "invalid offset x_len\+0\(FP\); expected x_len\+4\(FP\)"
+ MOVL x_len+0(FP), AX // ERROR "invalid offset x_len\+0\(FP\); expected x_len\+4\(FP\)"
+ MOVQ x_len+0(FP), AX // ERROR "invalid offset x_len\+0\(FP\); expected x_len\+4\(FP\)"
+ MOVW x_len+4(FP), AX // ERROR "invalid MOVW of x_len\+4\(FP\); string len is 4-byte value"
+ MOVL x_len+4(FP), AX
+ MOVQ x_len+4(FP), AX // ERROR "invalid MOVQ of x_len\+4\(FP\); string len is 4-byte value"
+ MOVQ y+0(FP), AX // ERROR "invalid offset y\+0\(FP\); expected y\+8\(FP\)"
+ MOVQ y_len+4(FP), AX // ERROR "invalid offset y_len\+4\(FP\); expected y_len\+12\(FP\)"
+ RET
+
+TEXT ·argslice(SB),0,$24 // ERROR "wrong argument size 0; expected \$\.\.\.-24"
+ MOVW x+0(FP), AX // ERROR "invalid MOVW of x\+0\(FP\); slice base is 4-byte value"
+ MOVL x+0(FP), AX
+ MOVQ x+0(FP), AX // ERROR "invalid MOVQ of x\+0\(FP\); slice base is 4-byte value"
+ MOVW x_base+0(FP), AX // ERROR "invalid MOVW of x_base\+0\(FP\); slice base is 4-byte value"
+ MOVL x_base+0(FP), AX
+ MOVQ x_base+0(FP), AX // ERROR "invalid MOVQ of x_base\+0\(FP\); slice base is 4-byte value"
+ MOVW x_len+0(FP), AX // ERROR "invalid offset x_len\+0\(FP\); expected x_len\+4\(FP\)"
+ MOVL x_len+0(FP), AX // ERROR "invalid offset x_len\+0\(FP\); expected x_len\+4\(FP\)"
+ MOVQ x_len+0(FP), AX // ERROR "invalid offset x_len\+0\(FP\); expected x_len\+4\(FP\)"
+ MOVW x_len+4(FP), AX // ERROR "invalid MOVW of x_len\+4\(FP\); slice len is 4-byte value"
+ MOVL x_len+4(FP), AX
+ MOVQ x_len+4(FP), AX // ERROR "invalid MOVQ of x_len\+4\(FP\); slice len is 4-byte value"
+ MOVW x_cap+0(FP), AX // ERROR "invalid offset x_cap\+0\(FP\); expected x_cap\+8\(FP\)"
+ MOVL x_cap+0(FP), AX // ERROR "invalid offset x_cap\+0\(FP\); expected x_cap\+8\(FP\)"
+ MOVQ x_cap+0(FP), AX // ERROR "invalid offset x_cap\+0\(FP\); expected x_cap\+8\(FP\)"
+ MOVW x_cap+8(FP), AX // ERROR "invalid MOVW of x_cap\+8\(FP\); slice cap is 4-byte value"
+ MOVL x_cap+8(FP), AX
+ MOVQ x_cap+8(FP), AX // ERROR "invalid MOVQ of x_cap\+8\(FP\); slice cap is 4-byte value"
+ MOVQ y+0(FP), AX // ERROR "invalid offset y\+0\(FP\); expected y\+12\(FP\)"
+ MOVQ y_len+4(FP), AX // ERROR "invalid offset y_len\+4\(FP\); expected y_len\+16\(FP\)"
+ MOVQ y_cap+8(FP), AX // ERROR "invalid offset y_cap\+8\(FP\); expected y_cap\+20\(FP\)"
+ RET
+
+TEXT ·argiface(SB),0,$0-16
+ MOVW x+0(FP), AX // ERROR "invalid MOVW of x\+0\(FP\); interface type is 4-byte value"
+ MOVL x+0(FP), AX
+ MOVQ x+0(FP), AX // ERROR "invalid MOVQ of x\+0\(FP\); interface type is 4-byte value"
+ MOVW x_type+0(FP), AX // ERROR "invalid MOVW of x_type\+0\(FP\); interface type is 4-byte value"
+ MOVL x_type+0(FP), AX
+ MOVQ x_type+0(FP), AX // ERROR "invalid MOVQ of x_type\+0\(FP\); interface type is 4-byte value"
+ MOVQ x_itable+0(FP), AX // ERROR "unknown variable x_itable; offset 0 is x_type\+0\(FP\)"
+ MOVQ x_itable+1(FP), AX // ERROR "unknown variable x_itable; offset 1 is x_type\+0\(FP\)"
+ MOVW x_data+0(FP), AX // ERROR "invalid offset x_data\+0\(FP\); expected x_data\+4\(FP\)"
+ MOVL x_data+0(FP), AX // ERROR "invalid offset x_data\+0\(FP\); expected x_data\+4\(FP\)"
+ MOVQ x_data+0(FP), AX // ERROR "invalid offset x_data\+0\(FP\); expected x_data\+4\(FP\)"
+ MOVW x_data+4(FP), AX // ERROR "invalid MOVW of x_data\+4\(FP\); interface data is 4-byte value"
+ MOVL x_data+4(FP), AX
+ MOVQ x_data+4(FP), AX // ERROR "invalid MOVQ of x_data\+4\(FP\); interface data is 4-byte value"
+ MOVW y+8(FP), AX // ERROR "invalid MOVW of y\+8\(FP\); interface itable is 4-byte value"
+ MOVL y+8(FP), AX
+ MOVQ y+8(FP), AX // ERROR "invalid MOVQ of y\+8\(FP\); interface itable is 4-byte value"
+ MOVW y_itable+8(FP), AX // ERROR "invalid MOVW of y_itable\+8\(FP\); interface itable is 4-byte value"
+ MOVL y_itable+8(FP), AX
+ MOVQ y_itable+8(FP), AX // ERROR "invalid MOVQ of y_itable\+8\(FP\); interface itable is 4-byte value"
+ MOVQ y_type+8(FP), AX // ERROR "unknown variable y_type; offset 8 is y_itable\+8\(FP\)"
+ MOVW y_data+8(FP), AX // ERROR "invalid offset y_data\+8\(FP\); expected y_data\+12\(FP\)"
+ MOVL y_data+8(FP), AX // ERROR "invalid offset y_data\+8\(FP\); expected y_data\+12\(FP\)"
+ MOVQ y_data+8(FP), AX // ERROR "invalid offset y_data\+8\(FP\); expected y_data\+12\(FP\)"
+ MOVW y_data+12(FP), AX // ERROR "invalid MOVW of y_data\+12\(FP\); interface data is 4-byte value"
+ MOVL y_data+12(FP), AX
+ MOVQ y_data+12(FP), AX // ERROR "invalid MOVQ of y_data\+12\(FP\); interface data is 4-byte value"
+ RET
+
+TEXT ·returnint(SB),0,$0-4
+ MOVB AX, ret+0(FP) // ERROR "invalid MOVB of ret\+0\(FP\); int is 4-byte value"
+ MOVW AX, ret+0(FP) // ERROR "invalid MOVW of ret\+0\(FP\); int is 4-byte value"
+ MOVL AX, ret+0(FP)
+ MOVQ AX, ret+0(FP) // ERROR "invalid MOVQ of ret\+0\(FP\); int is 4-byte value"
+ MOVQ AX, ret+1(FP) // ERROR "invalid offset ret\+1\(FP\); expected ret\+0\(FP\)"
+ MOVQ AX, r+0(FP) // ERROR "unknown variable r; offset 0 is ret\+0\(FP\)"
+ RET
+
+TEXT ·returnbyte(SB),0,$0-5
+ MOVL x+0(FP), AX
+ MOVB AX, ret+4(FP)
+ MOVW AX, ret+4(FP) // ERROR "invalid MOVW of ret\+4\(FP\); byte is 1-byte value"
+ MOVL AX, ret+4(FP) // ERROR "invalid MOVL of ret\+4\(FP\); byte is 1-byte value"
+ MOVQ AX, ret+4(FP) // ERROR "invalid MOVQ of ret\+4\(FP\); byte is 1-byte value"
+ MOVB AX, ret+3(FP) // ERROR "invalid offset ret\+3\(FP\); expected ret\+4\(FP\)"
+ RET
+
+TEXT ·returnnamed(SB),0,$0-21
+ MOVB x+0(FP), AX
+ MOVL AX, r1+4(FP)
+ MOVW AX, r2+8(FP)
+ MOVL AX, r3+12(FP)
+ MOVL AX, r3_base+12(FP)
+ MOVL AX, r3_len+16(FP)
+ MOVB AX, r4+20(FP)
+ MOVQ AX, r1+4(FP) // ERROR "invalid MOVQ of r1\+4\(FP\); int is 4-byte value"
+ RET
+
+TEXT ·returnintmissing(SB),0,$0-4
+ RET // ERROR "RET without writing to 4-byte ret\+0\(FP\)"
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/asm3.s b/llgo/third_party/go.tools/cmd/vet/testdata/asm3.s
new file mode 100644
index 0000000000000000000000000000000000000000..3d69356a0f93f811fdf1e727964e7da5acc3a5e3
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/asm3.s
@@ -0,0 +1,178 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm
+// +build vet_test
+
+TEXT ·arg1(SB),0,$0-2
+ MOVB x+0(FP), AX
+ MOVB y+1(FP), BX
+ MOVH x+0(FP), AX // ERROR "\[arm\] arg1: invalid MOVH of x\+0\(FP\); int8 is 1-byte value"
+ MOVH y+1(FP), AX // ERROR "invalid MOVH of y\+1\(FP\); uint8 is 1-byte value"
+ MOVW x+0(FP), AX // ERROR "invalid MOVW of x\+0\(FP\); int8 is 1-byte value"
+ MOVW y+1(FP), AX // ERROR "invalid MOVW of y\+1\(FP\); uint8 is 1-byte value"
+ MOVB x+1(FP), AX // ERROR "invalid offset x\+1\(FP\); expected x\+0\(FP\)"
+ MOVB y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+1\(FP\)"
+ MOVB 8(R13), AX // ERROR "8\(R13\) should be x\+0\(FP\)"
+ MOVB 9(R13), AX // ERROR "9\(R13\) should be y\+1\(FP\)"
+ MOVB 10(R13), AX // ERROR "use of 10\(R13\) points beyond argument frame"
+ RET
+
+TEXT ·arg2(SB),0,$0-4
+ MOVB x+0(FP), AX // ERROR "arg2: invalid MOVB of x\+0\(FP\); int16 is 2-byte value"
+ MOVB y+2(FP), AX // ERROR "invalid MOVB of y\+2\(FP\); uint16 is 2-byte value"
+ MOVH x+0(FP), AX
+ MOVH y+2(FP), BX
+ MOVW x+0(FP), AX // ERROR "invalid MOVW of x\+0\(FP\); int16 is 2-byte value"
+ MOVW y+2(FP), AX // ERROR "invalid MOVW of y\+2\(FP\); uint16 is 2-byte value"
+ MOVH x+2(FP), AX // ERROR "invalid offset x\+2\(FP\); expected x\+0\(FP\)"
+ MOVH y+0(FP), AX // ERROR "invalid offset y\+0\(FP\); expected y\+2\(FP\)"
+ RET
+
+TEXT ·arg4(SB),0,$0-2 // ERROR "arg4: wrong argument size 2; expected \$\.\.\.-8"
+ MOVB x+0(FP), AX // ERROR "invalid MOVB of x\+0\(FP\); int32 is 4-byte value"
+ MOVB y+4(FP), BX // ERROR "invalid MOVB of y\+4\(FP\); uint32 is 4-byte value"
+ MOVH x+0(FP), AX // ERROR "invalid MOVH of x\+0\(FP\); int32 is 4-byte value"
+ MOVH y+4(FP), AX // ERROR "invalid MOVH of y\+4\(FP\); uint32 is 4-byte value"
+ MOVW x+0(FP), AX
+ MOVW y+4(FP), AX
+ MOVW x+4(FP), AX // ERROR "invalid offset x\+4\(FP\); expected x\+0\(FP\)"
+ MOVW y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+4\(FP\)"
+ RET
+
+TEXT ·arg8(SB),7,$0-2 // ERROR "wrong argument size 2; expected \$\.\.\.-16"
+ MOVB x+0(FP), AX // ERROR "invalid MOVB of x\+0\(FP\); int64 is 8-byte value"
+ MOVB y+8(FP), BX // ERROR "invalid MOVB of y\+8\(FP\); uint64 is 8-byte value"
+ MOVH x+0(FP), AX // ERROR "invalid MOVH of x\+0\(FP\); int64 is 8-byte value"
+ MOVH y+8(FP), AX // ERROR "invalid MOVH of y\+8\(FP\); uint64 is 8-byte value"
+ MOVW x+0(FP), AX // ERROR "invalid MOVW of x\+0\(FP\); int64 is 8-byte value containing x_lo\+0\(FP\) and x_hi\+4\(FP\)"
+ MOVW x_lo+0(FP), AX
+ MOVW x_hi+4(FP), AX
+ MOVW y+8(FP), AX // ERROR "invalid MOVW of y\+8\(FP\); uint64 is 8-byte value containing y_lo\+8\(FP\) and y_hi\+12\(FP\)"
+ MOVW y_lo+8(FP), AX
+ MOVW y_hi+12(FP), AX
+ MOVQ x+0(FP), AX
+ MOVQ y+8(FP), AX
+ MOVQ x+8(FP), AX // ERROR "invalid offset x\+8\(FP\); expected x\+0\(FP\)"
+ MOVQ y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+8\(FP\)"
+ RET
+
+TEXT ·argint(SB),0,$0-2 // ERROR "wrong argument size 2; expected \$\.\.\.-8"
+ MOVB x+0(FP), AX // ERROR "invalid MOVB of x\+0\(FP\); int is 4-byte value"
+ MOVB y+4(FP), BX // ERROR "invalid MOVB of y\+4\(FP\); uint is 4-byte value"
+ MOVH x+0(FP), AX // ERROR "invalid MOVH of x\+0\(FP\); int is 4-byte value"
+ MOVH y+4(FP), AX // ERROR "invalid MOVH of y\+4\(FP\); uint is 4-byte value"
+ MOVW x+0(FP), AX
+ MOVW y+4(FP), AX
+ MOVQ x+4(FP), AX // ERROR "invalid offset x\+4\(FP\); expected x\+0\(FP\)"
+ MOVQ y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+4\(FP\)"
+ RET
+
+TEXT ·argptr(SB),7,$0-2 // ERROR "wrong argument size 2; expected \$\.\.\.-20"
+ MOVB x+0(FP), AX // ERROR "invalid MOVB of x\+0\(FP\); \*byte is 4-byte value"
+ MOVB y+4(FP), BX // ERROR "invalid MOVB of y\+4\(FP\); \*byte is 4-byte value"
+ MOVH x+0(FP), AX // ERROR "invalid MOVH of x\+0\(FP\); \*byte is 4-byte value"
+ MOVH y+4(FP), AX // ERROR "invalid MOVH of y\+4\(FP\); \*byte is 4-byte value"
+ MOVW x+0(FP), AX
+ MOVW y+4(FP), AX
+ MOVQ x+4(FP), AX // ERROR "invalid offset x\+4\(FP\); expected x\+0\(FP\)"
+ MOVQ y+2(FP), AX // ERROR "invalid offset y\+2\(FP\); expected y\+4\(FP\)"
+ MOVH c+8(FP), AX // ERROR "invalid MOVH of c\+8\(FP\); chan int is 4-byte value"
+ MOVH m+12(FP), AX // ERROR "invalid MOVH of m\+12\(FP\); map\[int\]int is 4-byte value"
+ MOVH f+16(FP), AX // ERROR "invalid MOVH of f\+16\(FP\); func\(\) is 4-byte value"
+ RET
+
+TEXT ·argstring(SB),0,$16 // ERROR "wrong argument size 0; expected \$\.\.\.-16"
+ MOVH x+0(FP), AX // ERROR "invalid MOVH of x\+0\(FP\); string base is 4-byte value"
+ MOVW x+0(FP), AX
+ MOVH x_base+0(FP), AX // ERROR "invalid MOVH of x_base\+0\(FP\); string base is 4-byte value"
+ MOVW x_base+0(FP), AX
+ MOVH x_len+0(FP), AX // ERROR "invalid offset x_len\+0\(FP\); expected x_len\+4\(FP\)"
+ MOVW x_len+0(FP), AX // ERROR "invalid offset x_len\+0\(FP\); expected x_len\+4\(FP\)"
+ MOVQ x_len+0(FP), AX // ERROR "invalid offset x_len\+0\(FP\); expected x_len\+4\(FP\)"
+ MOVH x_len+4(FP), AX // ERROR "invalid MOVH of x_len\+4\(FP\); string len is 4-byte value"
+ MOVW x_len+4(FP), AX
+ MOVQ y+0(FP), AX // ERROR "invalid offset y\+0\(FP\); expected y\+8\(FP\)"
+ MOVQ y_len+4(FP), AX // ERROR "invalid offset y_len\+4\(FP\); expected y_len\+12\(FP\)"
+ RET
+
+TEXT ·argslice(SB),0,$24 // ERROR "wrong argument size 0; expected \$\.\.\.-24"
+ MOVH x+0(FP), AX // ERROR "invalid MOVH of x\+0\(FP\); slice base is 4-byte value"
+ MOVW x+0(FP), AX
+ MOVH x_base+0(FP), AX // ERROR "invalid MOVH of x_base\+0\(FP\); slice base is 4-byte value"
+ MOVW x_base+0(FP), AX
+ MOVH x_len+0(FP), AX // ERROR "invalid offset x_len\+0\(FP\); expected x_len\+4\(FP\)"
+ MOVW x_len+0(FP), AX // ERROR "invalid offset x_len\+0\(FP\); expected x_len\+4\(FP\)"
+ MOVQ x_len+0(FP), AX // ERROR "invalid offset x_len\+0\(FP\); expected x_len\+4\(FP\)"
+ MOVH x_len+4(FP), AX // ERROR "invalid MOVH of x_len\+4\(FP\); slice len is 4-byte value"
+ MOVW x_len+4(FP), AX
+ MOVH x_cap+0(FP), AX // ERROR "invalid offset x_cap\+0\(FP\); expected x_cap\+8\(FP\)"
+ MOVW x_cap+0(FP), AX // ERROR "invalid offset x_cap\+0\(FP\); expected x_cap\+8\(FP\)"
+ MOVQ x_cap+0(FP), AX // ERROR "invalid offset x_cap\+0\(FP\); expected x_cap\+8\(FP\)"
+ MOVH x_cap+8(FP), AX // ERROR "invalid MOVH of x_cap\+8\(FP\); slice cap is 4-byte value"
+ MOVW x_cap+8(FP), AX
+ MOVQ y+0(FP), AX // ERROR "invalid offset y\+0\(FP\); expected y\+12\(FP\)"
+ MOVQ y_len+4(FP), AX // ERROR "invalid offset y_len\+4\(FP\); expected y_len\+16\(FP\)"
+ MOVQ y_cap+8(FP), AX // ERROR "invalid offset y_cap\+8\(FP\); expected y_cap\+20\(FP\)"
+ RET
+
+TEXT ·argiface(SB),0,$0-16
+ MOVH x+0(FP), AX // ERROR "invalid MOVH of x\+0\(FP\); interface type is 4-byte value"
+ MOVW x+0(FP), AX
+ MOVH x_type+0(FP), AX // ERROR "invalid MOVH of x_type\+0\(FP\); interface type is 4-byte value"
+ MOVW x_type+0(FP), AX
+ MOVQ x_itable+0(FP), AX // ERROR "unknown variable x_itable; offset 0 is x_type\+0\(FP\)"
+ MOVQ x_itable+1(FP), AX // ERROR "unknown variable x_itable; offset 1 is x_type\+0\(FP\)"
+ MOVH x_data+0(FP), AX // ERROR "invalid offset x_data\+0\(FP\); expected x_data\+4\(FP\)"
+ MOVW x_data+0(FP), AX // ERROR "invalid offset x_data\+0\(FP\); expected x_data\+4\(FP\)"
+ MOVQ x_data+0(FP), AX // ERROR "invalid offset x_data\+0\(FP\); expected x_data\+4\(FP\)"
+ MOVH x_data+4(FP), AX // ERROR "invalid MOVH of x_data\+4\(FP\); interface data is 4-byte value"
+ MOVW x_data+4(FP), AX
+ MOVH y+8(FP), AX // ERROR "invalid MOVH of y\+8\(FP\); interface itable is 4-byte value"
+ MOVW y+8(FP), AX
+ MOVH y_itable+8(FP), AX // ERROR "invalid MOVH of y_itable\+8\(FP\); interface itable is 4-byte value"
+ MOVW y_itable+8(FP), AX
+ MOVQ y_type+8(FP), AX // ERROR "unknown variable y_type; offset 8 is y_itable\+8\(FP\)"
+ MOVH y_data+8(FP), AX // ERROR "invalid offset y_data\+8\(FP\); expected y_data\+12\(FP\)"
+ MOVW y_data+8(FP), AX // ERROR "invalid offset y_data\+8\(FP\); expected y_data\+12\(FP\)"
+ MOVQ y_data+8(FP), AX // ERROR "invalid offset y_data\+8\(FP\); expected y_data\+12\(FP\)"
+ MOVH y_data+12(FP), AX // ERROR "invalid MOVH of y_data\+12\(FP\); interface data is 4-byte value"
+ MOVW y_data+12(FP), AX
+ RET
+
+TEXT ·returnint(SB),0,$0-4
+ MOVB AX, ret+0(FP) // ERROR "invalid MOVB of ret\+0\(FP\); int is 4-byte value"
+ MOVH AX, ret+0(FP) // ERROR "invalid MOVH of ret\+0\(FP\); int is 4-byte value"
+ MOVW AX, ret+0(FP)
+ MOVQ AX, ret+1(FP) // ERROR "invalid offset ret\+1\(FP\); expected ret\+0\(FP\)"
+ MOVQ AX, r+0(FP) // ERROR "unknown variable r; offset 0 is ret\+0\(FP\)"
+ RET
+
+TEXT ·returnbyte(SB),0,$0-5
+ MOVW x+0(FP), AX
+ MOVB AX, ret+4(FP)
+ MOVH AX, ret+4(FP) // ERROR "invalid MOVH of ret\+4\(FP\); byte is 1-byte value"
+ MOVW AX, ret+4(FP) // ERROR "invalid MOVW of ret\+4\(FP\); byte is 1-byte value"
+ MOVB AX, ret+3(FP) // ERROR "invalid offset ret\+3\(FP\); expected ret\+4\(FP\)"
+ RET
+
+TEXT ·returnnamed(SB),0,$0-21
+ MOVB x+0(FP), AX
+ MOVW AX, r1+4(FP)
+ MOVH AX, r2+8(FP)
+ MOVW AX, r3+12(FP)
+ MOVW AX, r3_base+12(FP)
+ MOVW AX, r3_len+16(FP)
+ MOVB AX, r4+20(FP)
+ MOVB AX, r1+4(FP) // ERROR "invalid MOVB of r1\+4\(FP\); int is 4-byte value"
+ RET
+
+TEXT ·returnintmissing(SB),0,$0-4
+ RET // ERROR "RET without writing to 4-byte ret\+0\(FP\)"
+
+TEXT ·leaf(SB),0,$-4-12
+ MOVW x+0(FP), AX
+ MOVW y+4(FP), AX
+ MOVW AX, ret+8(FP)
+ RET
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/asm4.s b/llgo/third_party/go.tools/cmd/vet/testdata/asm4.s
new file mode 100644
index 0000000000000000000000000000000000000000..044b050b6b99e1426f1cc653bb4c25428dd1b52d
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/asm4.s
@@ -0,0 +1,26 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64
+// +build vet_test
+
+// Test cases for symbolic NOSPLIT etc. on TEXT symbols.
+
+TEXT ·noprof(SB),NOPROF,$0-8
+ RET
+
+TEXT ·dupok(SB),DUPOK,$0-8
+ RET
+
+TEXT ·nosplit(SB),NOSPLIT,$0
+ RET
+
+TEXT ·rodata(SB),RODATA,$0-8
+ RET
+
+TEXT ·noptr(SB),NOPTR|NOSPLIT,$0
+ RET
+
+TEXT ·wrapper(SB),WRAPPER,$0-8
+ RET
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/assign.go b/llgo/third_party/go.tools/cmd/vet/testdata/assign.go
new file mode 100644
index 0000000000000000000000000000000000000000..32ba8683c148522a7f2cfa618e6a661c470e3a0f
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/assign.go
@@ -0,0 +1,18 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the useless-assignment checker.
+
+package testdata
+
+type ST struct {
+ x int
+}
+
+func (s *ST) SetX(x int) {
+ // Accidental self-assignment; it should be "s.x = x"
+ x = x // ERROR "self-assignment of x to x"
+ // Another mistake
+ s.x = s.x // ERROR "self-assignment of s.x to s.x"
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/atomic.go b/llgo/third_party/go.tools/cmd/vet/testdata/atomic.go
new file mode 100644
index 0000000000000000000000000000000000000000..1ba261d9412a2b47684e113f4562b447c1cf80e2
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/atomic.go
@@ -0,0 +1,43 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the atomic checker.
+
+package testdata
+
+import (
+ "sync/atomic"
+)
+
+type Counter uint64
+
+func AtomicTests() {
+ x := uint64(1)
+ x = atomic.AddUint64(&x, 1) // ERROR "direct assignment to atomic value"
+ _, x = 10, atomic.AddUint64(&x, 1) // ERROR "direct assignment to atomic value"
+ x, _ = atomic.AddUint64(&x, 1), 10 // ERROR "direct assignment to atomic value"
+
+ y := &x
+ *y = atomic.AddUint64(y, 1) // ERROR "direct assignment to atomic value"
+
+ var su struct{ Counter uint64 }
+ su.Counter = atomic.AddUint64(&su.Counter, 1) // ERROR "direct assignment to atomic value"
+ z1 := atomic.AddUint64(&su.Counter, 1)
+ _ = z1 // Avoid err "z declared and not used"
+
+ var sp struct{ Counter *uint64 }
+ *sp.Counter = atomic.AddUint64(sp.Counter, 1) // ERROR "direct assignment to atomic value"
+ z2 := atomic.AddUint64(sp.Counter, 1)
+ _ = z2 // Avoid err "z declared and not used"
+
+ au := []uint64{10, 20}
+ au[0] = atomic.AddUint64(&au[0], 1) // ERROR "direct assignment to atomic value"
+ au[1] = atomic.AddUint64(&au[0], 1)
+
+ ap := []*uint64{&au[0], &au[1]}
+ *ap[0] = atomic.AddUint64(ap[0], 1) // ERROR "direct assignment to atomic value"
+ *ap[1] = atomic.AddUint64(ap[0], 1)
+
+ x = atomic.AddUint64() // Used to make vet crash; now silently ignored.
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/bool.go b/llgo/third_party/go.tools/cmd/vet/testdata/bool.go
new file mode 100644
index 0000000000000000000000000000000000000000..af6cc011dd60c4d95fcdae7400945b4c943d1e67
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/bool.go
@@ -0,0 +1,113 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the bool checker.
+
+package testdata
+
+import "io"
+
+func RatherStupidConditions() {
+ var f, g func() int
+ if f() == 0 || f() == 0 { // OK f might have side effects
+ }
+ if v, w := f(), g(); v == w || v == w { // ERROR "redundant or: v == w || v == w"
+ }
+ _ = f == nil || f == nil // ERROR "redundant or: f == nil || f == nil"
+
+ _ = i == byte(1) || i == byte(1) // TODO conversions are treated as if they may have side effects
+
+ var c chan int
+ _ = 0 == <-c || 0 == <-c // OK subsequent receives may yield different values
+ for i, j := <-c, <-c; i == j || i == j; i, j = <-c, <-c { // ERROR "redundant or: i == j || i == j"
+ }
+
+ var i, j, k int
+ _ = i+1 == 1 || i+1 == 1 // ERROR "redundant or: i\+1 == 1 || i\+1 == 1"
+ _ = i == 1 || j+1 == i || i == 1 // ERROR "redundant or: i == 1 || i == 1"
+
+ _ = i == 1 || i == 1 || f() == 1 // ERROR "redundant or: i == 1 || i == 1"
+ _ = i == 1 || f() == 1 || i == 1 // OK f may alter i as a side effect
+ _ = f() == 1 || i == 1 || i == 1 // ERROR "redundant or: i == 1 || i == 1"
+
+ // Test partition edge cases
+ _ = f() == 1 || i == 1 || i == 1 || j == 1 // ERROR "redundant or: i == 1 || i == 1"
+ _ = f() == 1 || j == 1 || i == 1 || i == 1 // ERROR "redundant or: i == 1 || i == 1"
+ _ = i == 1 || f() == 1 || i == 1 || i == 1 // ERROR "redundant or: i == 1 || i == 1"
+ _ = i == 1 || i == 1 || f() == 1 || i == 1 // ERROR "redundant or: i == 1 || i == 1"
+ _ = i == 1 || i == 1 || j == 1 || f() == 1 // ERROR "redundant or: i == 1 || i == 1"
+ _ = j == 1 || i == 1 || i == 1 || f() == 1 // ERROR "redundant or: i == 1 || i == 1"
+ _ = i == 1 || f() == 1 || f() == 1 || i == 1
+
+ _ = i == 1 || (i == 1 || i == 2) // ERROR "redundant or: i == 1 || i == 1"
+ _ = i == 1 || (f() == 1 || i == 1) // OK f may alter i as a side effect
+ _ = i == 1 || (i == 1 || f() == 1) // ERROR "redundant or: i == 1 || i == 1"
+ _ = i == 1 || (i == 2 || (i == 1 || i == 3)) // ERROR "redundant or: i == 1 || i == 1"
+
+ var a, b bool
+ _ = i == 1 || (a || (i == 1 || b)) // ERROR "redundant or: i == 1 || i == 1"
+
+ // Check that all redundant ors are flagged
+ _ = j == 0 ||
+ i == 1 ||
+ f() == 1 ||
+ j == 0 || // ERROR "redundant or: j == 0 || j == 0"
+ i == 1 || // ERROR "redundant or: i == 1 || i == 1"
+ i == 1 || // ERROR "redundant or: i == 1 || i == 1"
+ i == 1 ||
+ j == 0 ||
+ k == 0
+
+ _ = i == 1*2*3 || i == 1*2*3 // ERROR "redundant or: i == 1\*2\*3 || i == 1\*2\*3"
+
+ // These test that redundant, suspect expressions do not trigger multiple errors.
+ _ = i != 0 || i != 0 // ERROR "redundant or: i != 0 || i != 0"
+ _ = i == 0 && i == 0 // ERROR "redundant and: i == 0 && i == 0"
+
+ // and is dual to or; check the basics and
+ // let the or tests pull the rest of the weight.
+ _ = 0 != <-c && 0 != <-c // OK subsequent receives may yield different values
+ _ = f() != 0 && f() != 0 // OK f might have side effects
+ _ = f != nil && f != nil // ERROR "redundant and: f != nil && f != nil"
+ _ = i != 1 && i != 1 && f() != 1 // ERROR "redundant and: i != 1 && i != 1"
+ _ = i != 1 && f() != 1 && i != 1 // OK f may alter i as a side effect
+ _ = f() != 1 && i != 1 && i != 1 // ERROR "redundant and: i != 1 && i != 1"
+}
+
+func RoyallySuspectConditions() {
+ var i, j int
+
+ _ = i == 0 || i == 1 // OK
+ _ = i != 0 || i != 1 // ERROR "suspect or: i != 0 || i != 1"
+ _ = i != 0 || 1 != i // ERROR "suspect or: i != 0 || 1 != i"
+ _ = 0 != i || 1 != i // ERROR "suspect or: 0 != i || 1 != i"
+ _ = 0 != i || i != 1 // ERROR "suspect or: 0 != i || i != 1"
+
+ _ = (0 != i) || i != 1 // ERROR "suspect or: 0 != i || i != 1"
+
+ _ = i+3 != 7 || j+5 == 0 || i+3 != 9 // ERROR "suspect or: i\+3 != 7 || i\+3 != 9"
+
+ _ = i != 0 || j == 0 || i != 1 // ERROR "suspect or: i != 0 || i != 1"
+
+ _ = i != 0 || i != 1<<4 // ERROR "suspect or: i != 0 || i != 1<<4"
+
+ _ = i != 0 || j != 0
+ _ = 0 != i || 0 != j
+
+ var s string
+ _ = s != "one" || s != "the other" // ERROR "suspect or: s != .one. || s != .the other."
+
+ _ = "et" != "alii" || "et" != "cetera" // ERROR "suspect or: .et. != .alii. || .et. != .cetera."
+ _ = "me gustas" != "tu" || "le gustas" != "tu" // OK we could catch this case, but it's not worth the code
+
+ var err error
+ _ = err != nil || err != io.EOF // TODO catch this case?
+
+ // Sanity check and.
+ _ = i != 0 && i != 1 // OK
+ _ = i == 0 && i == 1 // ERROR "suspect and: i == 0 && i == 1"
+ _ = i == 0 && 1 == i // ERROR "suspect and: i == 0 && 1 == i"
+ _ = 0 == i && 1 == i // ERROR "suspect and: 0 == i && 1 == i"
+ _ = 0 == i && i == 1 // ERROR "suspect and: 0 == i && i == 1"
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/buildtag.go b/llgo/third_party/go.tools/cmd/vet/testdata/buildtag.go
new file mode 100644
index 0000000000000000000000000000000000000000..eb36fd32592eee89609a0c92a3c8f5eb9e9dfc28
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/buildtag.go
@@ -0,0 +1,14 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the buildtag checker.
+
+// +builder // ERROR "possible malformed \+build comment"
+// +build !ignore
+
+package testdata
+
+// +build toolate // ERROR "build comment must appear before package clause and be followed by a blank line"
+
+var _ = 3
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/buildtag_bad.go b/llgo/third_party/go.tools/cmd/vet/testdata/buildtag_bad.go
new file mode 100644
index 0000000000000000000000000000000000000000..fbe10cf748f58b1a1291117d4d684b0dc941c782
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/buildtag_bad.go
@@ -0,0 +1,15 @@
+// This file contains misplaced or malformed build constraints.
+// The Go tool will skip it, because the constraints are invalid.
+// It serves only to test the tag checker during make test.
+
+// Mention +build // ERROR "possible malformed \+build comment"
+
+// +build !!bang // ERROR "invalid double negative in build constraint"
+// +build @#$ // ERROR "invalid non-alphanumeric build constraint"
+
+// +build toolate // ERROR "build comment must appear before package clause and be followed by a blank line"
+package bad
+
+// This is package 'bad' rather than 'main' so the erroneous build
+// tag doesn't end up looking like a package doc for the vet command
+// when examined by godoc.
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/composite.go b/llgo/third_party/go.tools/cmd/vet/testdata/composite.go
new file mode 100644
index 0000000000000000000000000000000000000000..69e7d7ccb0a84e2a8a02e1a8aebaaeb752881e4d
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/composite.go
@@ -0,0 +1,63 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the untagged struct literal checker.
+
+// This file contains the test for untagged struct literals.
+
+package testdata
+
+import (
+ "flag"
+ "go/scanner"
+)
+
+var Okay1 = []string{
+ "Name",
+ "Usage",
+ "DefValue",
+}
+
+var Okay2 = map[string]bool{
+ "Name": true,
+ "Usage": true,
+ "DefValue": true,
+}
+
+var Okay3 = struct {
+ X string
+ Y string
+ Z string
+}{
+ "Name",
+ "Usage",
+ "DefValue",
+}
+
+type MyStruct struct {
+ X string
+ Y string
+ Z string
+}
+
+var Okay4 = MyStruct{
+ "Name",
+ "Usage",
+ "DefValue",
+}
+
+// Testing is awkward because we need to reference things from a separate package
+// to trigger the warnings.
+
+var BadStructLiteralUsedInTests = flag.Flag{ // ERROR "unkeyed fields"
+ "Name",
+ "Usage",
+ nil, // Value
+ "DefValue",
+}
+
+// Used to test the check for slices and arrays: If that test is disabled and
+// vet is run with --compositewhitelist=false, this line triggers an error.
+// Clumsy but sufficient.
+var scannerErrorListTest = scanner.ErrorList{nil, nil}
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/copylock_func.go b/llgo/third_party/go.tools/cmd/vet/testdata/copylock_func.go
new file mode 100644
index 0000000000000000000000000000000000000000..108c04420954db802edb225505811b6b29eb6375
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/copylock_func.go
@@ -0,0 +1,90 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the copylock checker's
+// function declaration analysis.
+
+package testdata
+
+import "sync"
+
+func OkFunc(*sync.Mutex) {}
+func BadFunc(sync.Mutex) {} // ERROR "BadFunc passes Lock by value: sync.Mutex"
+func OkRet() *sync.Mutex {}
+func BadRet() sync.Mutex {} // ERROR "BadRet returns Lock by value: sync.Mutex"
+
+type EmbeddedRWMutex struct {
+ sync.RWMutex
+}
+
+func (*EmbeddedRWMutex) OkMeth() {}
+func (EmbeddedRWMutex) BadMeth() {} // ERROR "BadMeth passes Lock by value: testdata.EmbeddedRWMutex"
+func OkFunc(e *EmbeddedRWMutex) {}
+func BadFunc(EmbeddedRWMutex) {} // ERROR "BadFunc passes Lock by value: testdata.EmbeddedRWMutex"
+func OkRet() *EmbeddedRWMutex {}
+func BadRet() EmbeddedRWMutex {} // ERROR "BadRet returns Lock by value: testdata.EmbeddedRWMutex"
+
+type FieldMutex struct {
+ s sync.Mutex
+}
+
+func (*FieldMutex) OkMeth() {}
+func (FieldMutex) BadMeth() {} // ERROR "BadMeth passes Lock by value: testdata.FieldMutex contains sync.Mutex"
+func OkFunc(*FieldMutex) {}
+func BadFunc(FieldMutex, int) {} // ERROR "BadFunc passes Lock by value: testdata.FieldMutex contains sync.Mutex"
+
+type L0 struct {
+ L1
+}
+
+type L1 struct {
+ l L2
+}
+
+type L2 struct {
+ sync.Mutex
+}
+
+func (*L0) Ok() {}
+func (L0) Bad() {} // ERROR "Bad passes Lock by value: testdata.L0 contains testdata.L1 contains testdata.L2"
+
+type EmbeddedMutexPointer struct {
+ s *sync.Mutex // safe to copy this pointer
+}
+
+func (*EmbeddedMutexPointer) Ok() {}
+func (EmbeddedMutexPointer) AlsoOk() {}
+func StillOk(EmbeddedMutexPointer) {}
+func LookinGood() EmbeddedMutexPointer {}
+
+type EmbeddedLocker struct {
+ sync.Locker // safe to copy interface values
+}
+
+func (*EmbeddedLocker) Ok() {}
+func (EmbeddedLocker) AlsoOk() {}
+
+type CustomLock struct{}
+
+func (*CustomLock) Lock() {}
+func (*CustomLock) Unlock() {}
+
+func Ok(*CustomLock) {}
+func Bad(CustomLock) {} // ERROR "Bad passes Lock by value: testdata.CustomLock"
+
+// TODO: Unfortunate cases
+
+// Non-ideal error message:
+// Since we're looking for Lock methods, sync.Once's underlying
+// sync.Mutex gets called out, but without any reference to the sync.Once.
+type LocalOnce sync.Once
+
+func (LocalOnce) Bad() {} // ERROR "Bad passes Lock by value: testdata.LocalOnce contains sync.Mutex"
+
+// False negative:
+// LocalMutex doesn't have a Lock method.
+// Nevertheless, it is probably a bad idea to pass it by value.
+type LocalMutex sync.Mutex
+
+func (LocalMutex) Bad() {} // WANTED: An error here :(
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/copylock_range.go b/llgo/third_party/go.tools/cmd/vet/testdata/copylock_range.go
new file mode 100644
index 0000000000000000000000000000000000000000..f95b0252b6f4da6f6d57e408915cec381df53944
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/copylock_range.go
@@ -0,0 +1,67 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the copylock checker's
+// range statement analysis.
+
+package testdata
+
+import "sync"
+
+func rangeMutex() {
+ var mu sync.Mutex
+ var i int
+
+ var s []sync.Mutex
+ for range s {
+ }
+ for i = range s {
+ }
+ for i := range s {
+ }
+ for i, _ = range s {
+ }
+ for i, _ := range s {
+ }
+ for _, mu = range s { // ERROR "range var mu copies Lock: sync.Mutex"
+ }
+ for _, m := range s { // ERROR "range var m copies Lock: sync.Mutex"
+ }
+ for i, mu = range s { // ERROR "range var mu copies Lock: sync.Mutex"
+ }
+ for i, m := range s { // ERROR "range var m copies Lock: sync.Mutex"
+ }
+
+ var a [3]sync.Mutex
+ for _, m := range a { // ERROR "range var m copies Lock: sync.Mutex"
+ }
+
+ var m map[sync.Mutex]sync.Mutex
+ for k := range m { // ERROR "range var k copies Lock: sync.Mutex"
+ }
+ for mu, _ = range m { // ERROR "range var mu copies Lock: sync.Mutex"
+ }
+ for k, _ := range m { // ERROR "range var k copies Lock: sync.Mutex"
+ }
+ for _, mu = range m { // ERROR "range var mu copies Lock: sync.Mutex"
+ }
+ for _, v := range m { // ERROR "range var v copies Lock: sync.Mutex"
+ }
+
+ var c chan sync.Mutex
+ for range c {
+ }
+ for mu = range c { // ERROR "range var mu copies Lock: sync.Mutex"
+ }
+ for v := range c { // ERROR "range var v copies Lock: sync.Mutex"
+ }
+
+ // Test non-idents in range variables
+ var t struct {
+ i int
+ mu sync.Mutex
+ }
+ for t.i, t.mu = range s { // ERROR "range var t.mu copies Lock: sync.Mutex"
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/deadcode.go b/llgo/third_party/go.tools/cmd/vet/testdata/deadcode.go
new file mode 100644
index 0000000000000000000000000000000000000000..5370bc32f6577e37222d0ee10ac7ac3d4a0b657d
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/deadcode.go
@@ -0,0 +1,2125 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// This file contains tests for the dead code checker.
+
+package testdata
+
+type T int
+
+var x interface{}
+var c chan int
+
+func external() int // ok
+
+func _() int {
+}
+
+func _() int {
+ print(1)
+}
+
+func _() int {
+ print(1)
+ return 2
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+L:
+ print(1)
+ goto L
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ print(1)
+ panic(2)
+ println() // ERROR "unreachable code"
+}
+
+// but only builtin panic
+func _() int {
+ var panic = func(int) {}
+ print(1)
+ panic(2)
+ println() // ok
+}
+
+func _() int {
+ {
+ print(1)
+ return 2
+ println() // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+func _() int {
+ {
+ print(1)
+ return 2
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+L:
+ {
+ print(1)
+ goto L
+ println() // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+func _() int {
+L:
+ {
+ print(1)
+ goto L
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ print(1)
+ {
+ panic(2)
+ }
+}
+
+func _() int {
+ print(1)
+ {
+ panic(2)
+ println() // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+ print(1)
+ {
+ panic(2)
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ print(1)
+ return 2
+ { // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+L:
+ print(1)
+ goto L
+ { // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+ print(1)
+ panic(2)
+ { // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+ {
+ print(1)
+ return 2
+ { // ERROR "unreachable code"
+ }
+ }
+}
+
+func _() int {
+L:
+ {
+ print(1)
+ goto L
+ { // ERROR "unreachable code"
+ }
+ }
+}
+
+func _() int {
+ print(1)
+ {
+ panic(2)
+ { // ERROR "unreachable code"
+ }
+ }
+}
+
+func _() int {
+ {
+ print(1)
+ return 2
+ }
+ { // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+L:
+ {
+ print(1)
+ goto L
+ }
+ { // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+ print(1)
+ {
+ panic(2)
+ }
+ { // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+ print(1)
+ if x == nil {
+ panic(2)
+ } else {
+ panic(3)
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+L:
+ print(1)
+ if x == nil {
+ panic(2)
+ } else {
+ goto L
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+L:
+ print(1)
+ if x == nil {
+ panic(2)
+ } else if x == 1 {
+ return 0
+ } else if x != 2 {
+ panic(3)
+ } else {
+ goto L
+ }
+ println() // ERROR "unreachable code"
+}
+
+// if-else chain missing final else is not okay, even if the
+// conditions cover every possible case.
+
+func _() int {
+ print(1)
+ if x == nil {
+ panic(2)
+ } else if x != nil {
+ panic(3)
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ if x == nil {
+ panic(2)
+ }
+ println() // ok
+}
+
+func _() int {
+L:
+ print(1)
+ if x == nil {
+ panic(2)
+ } else if x == 1 {
+ return 0
+ } else if x != 1 {
+ panic(3)
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ for {
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ for {
+ for {
+ break
+ }
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ for {
+ for {
+ break
+ println() // ERROR "unreachable code"
+ }
+ }
+}
+
+func _() int {
+ for {
+ for {
+ continue
+ println() // ERROR "unreachable code"
+ }
+ }
+}
+
+func _() int {
+ for {
+ L:
+ for {
+ break L
+ }
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ print(1)
+ for {
+ break
+ }
+ println() // ok
+}
+
+func _() int {
+ for {
+ for {
+ }
+ break // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+func _() int {
+L:
+ for {
+ for {
+ break L
+ }
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ for x == nil {
+ }
+ println() // ok
+}
+
+func _() int {
+ for x == nil {
+ for {
+ break
+ }
+ }
+ println() // ok
+}
+
+func _() int {
+ for x == nil {
+ L:
+ for {
+ break L
+ }
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ for true {
+ }
+ println() // ok
+}
+
+func _() int {
+ for true {
+ for {
+ break
+ }
+ }
+ println() // ok
+}
+
+func _() int {
+ for true {
+ L:
+ for {
+ break L
+ }
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ select {}
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ println() // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ for {
+ }
+ println() // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ for {
+ }
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+L:
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ println() // ERROR "unreachable code"
+ case c <- 1:
+ print(2)
+ goto L
+ println() // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+L:
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ case c <- 1:
+ print(2)
+ goto L
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ println() // ERROR "unreachable code"
+ default:
+ select {}
+ println() // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ default:
+ select {}
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ }
+ println() // ok
+}
+
+func _() int {
+L:
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ goto L // ERROR "unreachable code"
+ case c <- 1:
+ print(2)
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ default:
+ print(2)
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ select {
+ default:
+ break
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ break // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+L:
+ select {
+ case <-c:
+ print(2)
+ for {
+ break L
+ }
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+L:
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ case c <- 1:
+ print(2)
+ break L
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ select {
+ case <-c:
+ print(1)
+ panic("abc")
+ default:
+ select {}
+ break // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ switch x {
+ case 1:
+ print(2)
+ panic(3)
+ println() // ERROR "unreachable code"
+ default:
+ return 4
+ println() // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+ print(1)
+ switch x {
+ case 1:
+ print(2)
+ panic(3)
+ default:
+ return 4
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ print(1)
+ switch x {
+ default:
+ return 4
+ println() // ERROR "unreachable code"
+ case 1:
+ print(2)
+ panic(3)
+ println() // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+ print(1)
+ switch x {
+ default:
+ return 4
+ case 1:
+ print(2)
+ panic(3)
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ print(1)
+ switch x {
+ case 1:
+ print(2)
+ fallthrough
+ default:
+ return 4
+ println() // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+ print(1)
+ switch x {
+ case 1:
+ print(2)
+ fallthrough
+ default:
+ return 4
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ print(1)
+ switch {
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ switch x {
+ case 1:
+ print(2)
+ panic(3)
+ case 2:
+ return 4
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ switch x {
+ case 2:
+ return 4
+ case 1:
+ print(2)
+ panic(3)
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ switch x {
+ case 1:
+ print(2)
+ fallthrough
+ case 2:
+ return 4
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ switch x {
+ case 1:
+ print(2)
+ panic(3)
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+L:
+ switch x {
+ case 1:
+ print(2)
+ panic(3)
+ break L // ERROR "unreachable code"
+ default:
+ return 4
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ switch x {
+ default:
+ return 4
+ break // ERROR "unreachable code"
+ case 1:
+ print(2)
+ panic(3)
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+L:
+ switch x {
+ case 1:
+ print(2)
+ for {
+ break L
+ }
+ default:
+ return 4
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ switch x.(type) {
+ case int:
+ print(2)
+ panic(3)
+ println() // ERROR "unreachable code"
+ default:
+ return 4
+ println() // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+ print(1)
+ switch x.(type) {
+ case int:
+ print(2)
+ panic(3)
+ default:
+ return 4
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ print(1)
+ switch x.(type) {
+ default:
+ return 4
+ println() // ERROR "unreachable code"
+ case int:
+ print(2)
+ panic(3)
+ println() // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+ print(1)
+ switch x.(type) {
+ default:
+ return 4
+ case int:
+ print(2)
+ panic(3)
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ print(1)
+ switch x.(type) {
+ case int:
+ print(2)
+ fallthrough
+ default:
+ return 4
+ println() // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+ print(1)
+ switch x.(type) {
+ case int:
+ print(2)
+ fallthrough
+ default:
+ return 4
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ print(1)
+ switch {
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ switch x.(type) {
+ case int:
+ print(2)
+ panic(3)
+ case float64:
+ return 4
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ switch x.(type) {
+ case float64:
+ return 4
+ case int:
+ print(2)
+ panic(3)
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ switch x.(type) {
+ case int:
+ print(2)
+ fallthrough
+ case float64:
+ return 4
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ switch x.(type) {
+ case int:
+ print(2)
+ panic(3)
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+L:
+ switch x.(type) {
+ case int:
+ print(2)
+ panic(3)
+ break L // ERROR "unreachable code"
+ default:
+ return 4
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+ switch x.(type) {
+ default:
+ return 4
+ break // ERROR "unreachable code"
+ case int:
+ print(2)
+ panic(3)
+ }
+ println() // ok
+}
+
+func _() int {
+ print(1)
+L:
+ switch x.(type) {
+ case int:
+ print(2)
+ for {
+ break L
+ }
+ default:
+ return 4
+ }
+ println() // ok
+}
+
+// again, but without the leading print(1).
+// testing that everything works when the terminating statement is first.
+
+func _() int {
+ println() // ok
+}
+
+func _() int {
+ return 2
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+L:
+ goto L
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ panic(2)
+ println() // ERROR "unreachable code"
+}
+
+// but only builtin panic
+func _() int {
+ var panic = func(int) {}
+ panic(2)
+ println() // ok
+}
+
+func _() int {
+ {
+ return 2
+ println() // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+ {
+ return 2
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+L:
+ {
+ goto L
+ println() // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+L:
+ {
+ goto L
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ {
+ panic(2)
+ println() // ERROR "unreachable code"
+ }
+}
+
+func _() int {
+ {
+ panic(2)
+ }
+ println() // ERROR "unreachable code"
+}
+
+func _() int {
+ return 2
+ { // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+func _() int {
+L:
+ goto L
+ { // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+func _() int {
+ panic(2)
+ { // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+func _() int {
+ {
+ return 2
+ { // ERROR "unreachable code"
+ }
+ }
+ println() // ok
+}
+
+func _() int {
+L:
+ {
+ goto L
+ { // ERROR "unreachable code"
+ }
+ }
+ println() // ok
+}
+
+func _() int {
+ {
+ panic(2)
+ { // ERROR "unreachable code"
+ }
+ }
+ println() // ok
+}
+
+func _() int {
+ {
+ return 2
+ }
+ { // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+func _() int {
+L:
+ {
+ goto L
+ }
+ { // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+func _() int {
+ {
+ panic(2)
+ }
+ { // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+// again, with func literals
+
+var _ = func() int {
+}
+
+var _ = func() int {
+ print(1)
+}
+
+var _ = func() int {
+ print(1)
+ return 2
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+L:
+ print(1)
+ goto L
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ print(1)
+ panic(2)
+ println() // ERROR "unreachable code"
+}
+
+// but only builtin panic
+var _ = func() int {
+ var panic = func(int) {}
+ print(1)
+ panic(2)
+ println() // ok
+}
+
+var _ = func() int {
+ {
+ print(1)
+ return 2
+ println() // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ {
+ print(1)
+ return 2
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+L:
+ {
+ print(1)
+ goto L
+ println() // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+var _ = func() int {
+L:
+ {
+ print(1)
+ goto L
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ print(1)
+ {
+ panic(2)
+ }
+}
+
+var _ = func() int {
+ print(1)
+ {
+ panic(2)
+ println() // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+ print(1)
+ {
+ panic(2)
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ print(1)
+ return 2
+ { // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+L:
+ print(1)
+ goto L
+ { // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+ print(1)
+ panic(2)
+ { // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+ {
+ print(1)
+ return 2
+ { // ERROR "unreachable code"
+ }
+ }
+}
+
+var _ = func() int {
+L:
+ {
+ print(1)
+ goto L
+ { // ERROR "unreachable code"
+ }
+ }
+}
+
+var _ = func() int {
+ print(1)
+ {
+ panic(2)
+ { // ERROR "unreachable code"
+ }
+ }
+}
+
+var _ = func() int {
+ {
+ print(1)
+ return 2
+ }
+ { // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+L:
+ {
+ print(1)
+ goto L
+ }
+ { // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+ print(1)
+ {
+ panic(2)
+ }
+ { // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+ print(1)
+ if x == nil {
+ panic(2)
+ } else {
+ panic(3)
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+L:
+ print(1)
+ if x == nil {
+ panic(2)
+ } else {
+ goto L
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+L:
+ print(1)
+ if x == nil {
+ panic(2)
+ } else if x == 1 {
+ return 0
+ } else if x != 2 {
+ panic(3)
+ } else {
+ goto L
+ }
+ println() // ERROR "unreachable code"
+}
+
+// if-else chain missing final else is not okay, even if the
+// conditions cover every possible case.
+
+var _ = func() int {
+ print(1)
+ if x == nil {
+ panic(2)
+ } else if x != nil {
+ panic(3)
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ if x == nil {
+ panic(2)
+ }
+ println() // ok
+}
+
+var _ = func() int {
+L:
+ print(1)
+ if x == nil {
+ panic(2)
+ } else if x == 1 {
+ return 0
+ } else if x != 1 {
+ panic(3)
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ for {
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ for {
+ for {
+ break
+ }
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ for {
+ for {
+ break
+ println() // ERROR "unreachable code"
+ }
+ }
+}
+
+var _ = func() int {
+ for {
+ for {
+ continue
+ println() // ERROR "unreachable code"
+ }
+ }
+}
+
+var _ = func() int {
+ for {
+ L:
+ for {
+ break L
+ }
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ print(1)
+ for {
+ break
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ for {
+ for {
+ }
+ break // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+var _ = func() int {
+L:
+ for {
+ for {
+ break L
+ }
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ for x == nil {
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ for x == nil {
+ for {
+ break
+ }
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ for x == nil {
+ L:
+ for {
+ break L
+ }
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ for true {
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ for true {
+ for {
+ break
+ }
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ for true {
+ L:
+ for {
+ break L
+ }
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ select {}
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ println() // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ for {
+ }
+ println() // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ for {
+ }
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+L:
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ println() // ERROR "unreachable code"
+ case c <- 1:
+ print(2)
+ goto L
+ println() // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+L:
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ case c <- 1:
+ print(2)
+ goto L
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ println() // ERROR "unreachable code"
+ default:
+ select {}
+ println() // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ default:
+ select {}
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ }
+ println() // ok
+}
+
+var _ = func() int {
+L:
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ goto L // ERROR "unreachable code"
+ case c <- 1:
+ print(2)
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ default:
+ print(2)
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ select {
+ default:
+ break
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ break // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+L:
+ select {
+ case <-c:
+ print(2)
+ for {
+ break L
+ }
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+L:
+ select {
+ case <-c:
+ print(2)
+ panic("abc")
+ case c <- 1:
+ print(2)
+ break L
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ select {
+ case <-c:
+ print(1)
+ panic("abc")
+ default:
+ select {}
+ break // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ switch x {
+ case 1:
+ print(2)
+ panic(3)
+ println() // ERROR "unreachable code"
+ default:
+ return 4
+ println() // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+ print(1)
+ switch x {
+ case 1:
+ print(2)
+ panic(3)
+ default:
+ return 4
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ print(1)
+ switch x {
+ default:
+ return 4
+ println() // ERROR "unreachable code"
+ case 1:
+ print(2)
+ panic(3)
+ println() // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+ print(1)
+ switch x {
+ default:
+ return 4
+ case 1:
+ print(2)
+ panic(3)
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ print(1)
+ switch x {
+ case 1:
+ print(2)
+ fallthrough
+ default:
+ return 4
+ println() // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+ print(1)
+ switch x {
+ case 1:
+ print(2)
+ fallthrough
+ default:
+ return 4
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ print(1)
+ switch {
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ switch x {
+ case 1:
+ print(2)
+ panic(3)
+ case 2:
+ return 4
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ switch x {
+ case 2:
+ return 4
+ case 1:
+ print(2)
+ panic(3)
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ switch x {
+ case 1:
+ print(2)
+ fallthrough
+ case 2:
+ return 4
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ switch x {
+ case 1:
+ print(2)
+ panic(3)
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+L:
+ switch x {
+ case 1:
+ print(2)
+ panic(3)
+ break L // ERROR "unreachable code"
+ default:
+ return 4
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ switch x {
+ default:
+ return 4
+ break // ERROR "unreachable code"
+ case 1:
+ print(2)
+ panic(3)
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+L:
+ switch x {
+ case 1:
+ print(2)
+ for {
+ break L
+ }
+ default:
+ return 4
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ switch x.(type) {
+ case int:
+ print(2)
+ panic(3)
+ println() // ERROR "unreachable code"
+ default:
+ return 4
+ println() // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+ print(1)
+ switch x.(type) {
+ case int:
+ print(2)
+ panic(3)
+ default:
+ return 4
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ print(1)
+ switch x.(type) {
+ default:
+ return 4
+ println() // ERROR "unreachable code"
+ case int:
+ print(2)
+ panic(3)
+ println() // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+ print(1)
+ switch x.(type) {
+ default:
+ return 4
+ case int:
+ print(2)
+ panic(3)
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ print(1)
+ switch x.(type) {
+ case int:
+ print(2)
+ fallthrough
+ default:
+ return 4
+ println() // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+ print(1)
+ switch x.(type) {
+ case int:
+ print(2)
+ fallthrough
+ default:
+ return 4
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ print(1)
+ switch {
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ switch x.(type) {
+ case int:
+ print(2)
+ panic(3)
+ case float64:
+ return 4
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ switch x.(type) {
+ case float64:
+ return 4
+ case int:
+ print(2)
+ panic(3)
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ switch x.(type) {
+ case int:
+ print(2)
+ fallthrough
+ case float64:
+ return 4
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ switch x.(type) {
+ case int:
+ print(2)
+ panic(3)
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+L:
+ switch x.(type) {
+ case int:
+ print(2)
+ panic(3)
+ break L // ERROR "unreachable code"
+ default:
+ return 4
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+ switch x.(type) {
+ default:
+ return 4
+ break // ERROR "unreachable code"
+ case int:
+ print(2)
+ panic(3)
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ print(1)
+L:
+ switch x.(type) {
+ case int:
+ print(2)
+ for {
+ break L
+ }
+ default:
+ return 4
+ }
+ println() // ok
+}
+
+// again, but without the leading print(1).
+// testing that everything works when the terminating statement is first.
+
+var _ = func() int {
+ println() // ok
+}
+
+var _ = func() int {
+ return 2
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+L:
+ goto L
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ panic(2)
+ println() // ERROR "unreachable code"
+}
+
+// but only builtin panic
+var _ = func() int {
+ var panic = func(int) {}
+ panic(2)
+ println() // ok
+}
+
+var _ = func() int {
+ {
+ return 2
+ println() // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+ {
+ return 2
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+L:
+ {
+ goto L
+ println() // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+L:
+ {
+ goto L
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ {
+ panic(2)
+ println() // ERROR "unreachable code"
+ }
+}
+
+var _ = func() int {
+ {
+ panic(2)
+ }
+ println() // ERROR "unreachable code"
+}
+
+var _ = func() int {
+ return 2
+ { // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+var _ = func() int {
+L:
+ goto L
+ { // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ panic(2)
+ { // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ {
+ return 2
+ { // ERROR "unreachable code"
+ }
+ }
+ println() // ok
+}
+
+var _ = func() int {
+L:
+ {
+ goto L
+ { // ERROR "unreachable code"
+ }
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ {
+ panic(2)
+ { // ERROR "unreachable code"
+ }
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ {
+ return 2
+ }
+ { // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+var _ = func() int {
+L:
+ {
+ goto L
+ }
+ { // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+var _ = func() int {
+ {
+ panic(2)
+ }
+ { // ERROR "unreachable code"
+ }
+ println() // ok
+}
+
+var _ = func() {
+ // goto without label used to panic
+ goto
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/method.go b/llgo/third_party/go.tools/cmd/vet/testdata/method.go
new file mode 100644
index 0000000000000000000000000000000000000000..52b500df272803645f904ae22e9323117bdc83e6
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/method.go
@@ -0,0 +1,22 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the canonical method checker.
+
+// This file contains the code to check canonical methods.
+
+package testdata
+
+import (
+ "fmt"
+)
+
+type MethodTest int
+
+func (t *MethodTest) Scan(x fmt.ScanState, c byte) { // ERROR "should have signature Scan"
+}
+
+type MethodTestInterface interface {
+ ReadByte() byte // ERROR "should have signature ReadByte"
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/nilfunc.go b/llgo/third_party/go.tools/cmd/vet/testdata/nilfunc.go
new file mode 100644
index 0000000000000000000000000000000000000000..2ce7bc8ca82eccc641e7d6879ac7dcdb02f5399d
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/nilfunc.go
@@ -0,0 +1,35 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testdata
+
+func F() {}
+
+type T struct {
+ F func()
+}
+
+func (T) M() {}
+
+var Fv = F
+
+func Comparison() {
+ var t T
+ var fn func()
+ if fn == nil || Fv == nil || t.F == nil {
+ // no error; these func vars or fields may be nil
+ }
+ if F == nil { // ERROR "comparison of function F == nil is always false"
+ panic("can't happen")
+ }
+ if t.M == nil { // ERROR "comparison of function M == nil is always false"
+ panic("can't happen")
+ }
+ if F != nil { // ERROR "comparison of function F != nil is always true"
+ if t.M != nil { // ERROR "comparison of function M != nil is always true"
+ return
+ }
+ }
+ panic("can't happen")
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/print.go b/llgo/third_party/go.tools/cmd/vet/testdata/print.go
new file mode 100644
index 0000000000000000000000000000000000000000..3875ac506917a60a5ca476d0e1d7688a525f61c8
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/print.go
@@ -0,0 +1,340 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the printf checker.
+
+package testdata
+
+import (
+ "fmt"
+ "math"
+ "os"
+ "unsafe" // just for test case printing unsafe.Pointer
+)
+
+func UnsafePointerPrintfTest() {
+ var up unsafe.Pointer
+ fmt.Printf("%p, %x %X", up, up, up)
+}
+
+// Error methods that do not satisfy the Error interface and should be checked.
+type errorTest1 int
+
+func (errorTest1) Error(...interface{}) string {
+ return "hi"
+}
+
+type errorTest2 int // Analogous to testing's *T type.
+func (errorTest2) Error(...interface{}) {
+}
+
+type errorTest3 int
+
+func (errorTest3) Error() { // No return value.
+}
+
+type errorTest4 int
+
+func (errorTest4) Error() int { // Different return type.
+ return 3
+}
+
+type errorTest5 int
+
+func (errorTest5) error() { // niladic; don't complain if no args (was bug)
+}
+
+// This function never executes, but it serves as a simple test for the program.
+// Test with make test.
+func PrintfTests() {
+ var b bool
+ var i int
+ var r rune
+ var s string
+ var x float64
+ var p *int
+ var imap map[int]int
+ var fslice []float64
+ var c complex64
+ // Some good format/argtypes
+ fmt.Printf("")
+ fmt.Printf("%b %b %b", 3, i, x)
+ fmt.Printf("%c %c %c %c", 3, i, 'x', r)
+ fmt.Printf("%d %d %d", 3, i, imap)
+ fmt.Printf("%e %e %e %e", 3e9, x, fslice, c)
+ fmt.Printf("%E %E %E %E", 3e9, x, fslice, c)
+ fmt.Printf("%f %f %f %f", 3e9, x, fslice, c)
+ fmt.Printf("%F %F %F %F", 3e9, x, fslice, c)
+ fmt.Printf("%g %g %g %g", 3e9, x, fslice, c)
+ fmt.Printf("%G %G %G %G", 3e9, x, fslice, c)
+ fmt.Printf("%b %b %b %b", 3e9, x, fslice, c)
+ fmt.Printf("%o %o", 3, i)
+ fmt.Printf("%p %p", p, nil)
+ fmt.Printf("%q %q %q %q", 3, i, 'x', r)
+ fmt.Printf("%s %s %s", "hi", s, []byte{65})
+ fmt.Printf("%t %t", true, b)
+ fmt.Printf("%T %T", 3, i)
+ fmt.Printf("%U %U", 3, i)
+ fmt.Printf("%v %v", 3, i)
+ fmt.Printf("%x %x %x %x", 3, i, "hi", s)
+ fmt.Printf("%X %X %X %X", 3, i, "hi", s)
+ fmt.Printf("%.*s %d %g", 3, "hi", 23, 2.3)
+ fmt.Printf("%s", &stringerv)
+ fmt.Printf("%v", &stringerv)
+ fmt.Printf("%T", &stringerv)
+ fmt.Printf("%v", notstringerv)
+ fmt.Printf("%T", notstringerv)
+ fmt.Printf("%q", stringerarrayv)
+ fmt.Printf("%v", stringerarrayv)
+ fmt.Printf("%s", stringerarrayv)
+ fmt.Printf("%v", notstringerarrayv)
+ fmt.Printf("%T", notstringerarrayv)
+ fmt.Printf("%d", new(Formatter))
+ fmt.Printf("%*%", 2) // Ridiculous but allowed.
+ fmt.Printf("%s", interface{}(nil)) // Nothing useful we can say.
+
+ fmt.Printf("%g", 1+2i)
+ // Some bad format/argTypes
+ fmt.Printf("%b", "hi") // ERROR "arg .hi. for printf verb %b of wrong type"
+ fmt.Printf("%t", c) // ERROR "arg c for printf verb %t of wrong type"
+ fmt.Printf("%t", 1+2i) // ERROR "arg 1 \+ 2i for printf verb %t of wrong type"
+ fmt.Printf("%c", 2.3) // ERROR "arg 2.3 for printf verb %c of wrong type"
+ fmt.Printf("%d", 2.3) // ERROR "arg 2.3 for printf verb %d of wrong type"
+ fmt.Printf("%e", "hi") // ERROR "arg .hi. for printf verb %e of wrong type"
+ fmt.Printf("%E", true) // ERROR "arg true for printf verb %E of wrong type"
+ fmt.Printf("%f", "hi") // ERROR "arg .hi. for printf verb %f of wrong type"
+ fmt.Printf("%F", 'x') // ERROR "arg 'x' for printf verb %F of wrong type"
+ fmt.Printf("%g", "hi") // ERROR "arg .hi. for printf verb %g of wrong type"
+ fmt.Printf("%g", imap) // ERROR "arg imap for printf verb %g of wrong type"
+ fmt.Printf("%G", i) // ERROR "arg i for printf verb %G of wrong type"
+ fmt.Printf("%o", x) // ERROR "arg x for printf verb %o of wrong type"
+ fmt.Printf("%p", 23) // ERROR "arg 23 for printf verb %p of wrong type"
+ fmt.Printf("%q", x) // ERROR "arg x for printf verb %q of wrong type"
+ fmt.Printf("%s", b) // ERROR "arg b for printf verb %s of wrong type"
+ fmt.Printf("%s", byte(65)) // ERROR "arg byte\(65\) for printf verb %s of wrong type"
+ fmt.Printf("%t", 23) // ERROR "arg 23 for printf verb %t of wrong type"
+ fmt.Printf("%U", x) // ERROR "arg x for printf verb %U of wrong type"
+ fmt.Printf("%x", nil) // ERROR "arg nil for printf verb %x of wrong type"
+ fmt.Printf("%X", 2.3) // ERROR "arg 2.3 for printf verb %X of wrong type"
+ fmt.Printf("%s", stringerv) // ERROR "arg stringerv for printf verb %s of wrong type"
+ fmt.Printf("%t", stringerv) // ERROR "arg stringerv for printf verb %t of wrong type"
+ fmt.Printf("%q", notstringerv) // ERROR "arg notstringerv for printf verb %q of wrong type"
+ fmt.Printf("%t", notstringerv) // ERROR "arg notstringerv for printf verb %t of wrong type"
+ fmt.Printf("%t", stringerarrayv) // ERROR "arg stringerarrayv for printf verb %t of wrong type"
+ fmt.Printf("%t", notstringerarrayv) // ERROR "arg notstringerarrayv for printf verb %t of wrong type"
+ fmt.Printf("%q", notstringerarrayv) // ERROR "arg notstringerarrayv for printf verb %q of wrong type"
+ fmt.Printf("%d", Formatter(true)) // correct (the type is responsible for formatting)
+ fmt.Printf("%s", nonemptyinterface) // correct (the dynamic type of nonemptyinterface may be a stringer)
+ fmt.Printf("%.*s %d %g", 3, "hi", 23, 'x') // ERROR "arg 'x' for printf verb %g of wrong type"
+ fmt.Println() // not an error
+ fmt.Println("%s", "hi") // ERROR "possible formatting directive in Println call"
+ fmt.Printf("%s", "hi", 3) // ERROR "wrong number of args for format in Printf call"
+ fmt.Sprintf("%"+("s"), "hi", 3) // ERROR "wrong number of args for format in Sprintf call"
+ fmt.Printf("%s%%%d", "hi", 3) // correct
+ fmt.Printf("%08s", "woo") // correct
+ fmt.Printf("% 8s", "woo") // correct
+ fmt.Printf("%.*d", 3, 3) // correct
+ fmt.Printf("%.*d", 3, 3, 3, 3) // ERROR "wrong number of args for format in Printf call.*4 args"
+ fmt.Printf("%.*d", "hi", 3) // ERROR "arg .hi. for \* in printf format not of type int"
+ fmt.Printf("%.*d", i, 3) // correct
+ fmt.Printf("%.*d", s, 3) // ERROR "arg s for \* in printf format not of type int"
+ fmt.Printf("%*%", 0.22) // ERROR "arg 0.22 for \* in printf format not of type int"
+ fmt.Printf("%q %q", multi()...) // ok
+ fmt.Printf("%#q", `blah`) // ok
+ printf("now is the time", "buddy") // ERROR "no formatting directive"
+ Printf("now is the time", "buddy") // ERROR "no formatting directive"
+ Printf("hi") // ok
+ const format = "%s %s\n"
+ Printf(format, "hi", "there")
+ Printf(format, "hi") // ERROR "missing argument for Printf..%s..: format reads arg 2, have only 1"
+ Printf("%s %d %.3v %q", "str", 4) // ERROR "missing argument for Printf..%.3v..: format reads arg 3, have only 2"
+ f := new(stringer)
+ f.Warn(0, "%s", "hello", 3) // ERROR "possible formatting directive in Warn call"
+ f.Warnf(0, "%s", "hello", 3) // ERROR "wrong number of args for format in Warnf call"
+ f.Warnf(0, "%r", "hello") // ERROR "unrecognized printf verb"
+ f.Warnf(0, "%#s", "hello") // ERROR "unrecognized printf flag"
+ Printf("d%", 2) // ERROR "missing verb at end of format string in Printf call"
+ Printf("%d", percentDV)
+ Printf("%d", &percentDV)
+ Printf("%d", notPercentDV) // ERROR "arg notPercentDV for printf verb %d of wrong type"
+ Printf("%d", ¬PercentDV) // ERROR "arg ¬PercentDV for printf verb %d of wrong type"
+ Printf("%p", ¬PercentDV) // Works regardless: we print it as a pointer.
+ Printf("%s", percentSV)
+ Printf("%s", &percentSV)
+ // Good argument reorderings.
+ Printf("%[1]d", 3)
+ Printf("%[1]*d", 3, 1)
+ Printf("%[2]*[1]d", 1, 3)
+ Printf("%[2]*.[1]*[3]d", 2, 3, 4)
+ fmt.Fprintf(os.Stderr, "%[2]*.[1]*[3]d", 2, 3, 4) // Use Fprintf to make sure we count arguments correctly.
+ // Bad argument reorderings.
+ Printf("%[xd", 3) // ERROR "illegal syntax for printf argument index"
+ Printf("%[x]d", 3) // ERROR "illegal syntax for printf argument index"
+ Printf("%[3]*s", "hi", 2) // ERROR "missing argument for Printf.* reads arg 3, have only 2"
+ fmt.Sprintf("%[3]d", 2) // ERROR "missing argument for Sprintf.* reads arg 3, have only 1"
+ Printf("%[2]*.[1]*[3]d", 2, "hi", 4) // ERROR "arg .hi. for \* in printf format not of type int"
+ // Something that satisfies the error interface.
+ var e error
+ fmt.Println(e.Error()) // ok
+ // Something that looks like an error interface but isn't, such as the (*T).Error method
+ // in the testing package.
+ var et1 errorTest1
+ fmt.Println(et1.Error()) // ERROR "no args in Error call"
+ fmt.Println(et1.Error("hi")) // ok
+ fmt.Println(et1.Error("%d", 3)) // ERROR "possible formatting directive in Error call"
+ var et2 errorTest2
+ et2.Error() // ERROR "no args in Error call"
+ et2.Error("hi") // ok, not an error method.
+ et2.Error("%d", 3) // ERROR "possible formatting directive in Error call"
+ var et3 errorTest3
+ et3.Error() // ok, not an error method.
+ var et4 errorTest4
+ et4.Error() // ok, not an error method.
+ var et5 errorTest5
+ et5.error() // ok, not an error method.
+ // Bug: used to recur forever.
+ Printf("%p %x", recursiveStructV, recursiveStructV.next)
+ Printf("%p %x", recursiveStruct1V, recursiveStruct1V.next)
+ Printf("%p %x", recursiveSliceV, recursiveSliceV)
+ Printf("%p %x", recursiveMapV, recursiveMapV)
+ // Special handling for Log.
+ math.Log(3) // OK
+ Log(3) // OK
+ Log("%d", 3) // ERROR "possible formatting directive in Log call"
+ Logf("%d", 3)
+ Logf("%d", "hi") // ERROR "arg .hi. for printf verb %d of wrong type: untyped string"
+
+}
+
+// Printf is used by the test so we must declare it.
+func Printf(format string, args ...interface{}) {
+ panic("don't call - testing only")
+}
+
+// printf is used by the test so we must declare it.
+func printf(format string, args ...interface{}) {
+ panic("don't call - testing only")
+}
+
+// multi is used by the test.
+func multi() []interface{} {
+ panic("don't call - testing only")
+}
+
+type stringer float64
+
+var stringerv stringer
+
+func (*stringer) String() string {
+ return "string"
+}
+
+func (*stringer) Warn(int, ...interface{}) string {
+ return "warn"
+}
+
+func (*stringer) Warnf(int, string, ...interface{}) string {
+ return "warnf"
+}
+
+type notstringer struct {
+ f float64
+}
+
+var notstringerv notstringer
+
+type stringerarray [4]float64
+
+func (stringerarray) String() string {
+ return "string"
+}
+
+var stringerarrayv stringerarray
+
+type notstringerarray [4]float64
+
+var notstringerarrayv notstringerarray
+
+var nonemptyinterface = interface {
+ f()
+}(nil)
+
+// A data type we can print with "%d".
+type percentDStruct struct {
+ a int
+ b []byte
+ c *float64
+}
+
+var percentDV percentDStruct
+
+// A data type we cannot print correctly with "%d".
+type notPercentDStruct struct {
+ a int
+ b []byte
+ c bool
+}
+
+var notPercentDV notPercentDStruct
+
+// A data type we can print with "%s".
+type percentSStruct struct {
+ a string
+ b []byte
+ c stringerarray
+}
+
+var percentSV percentSStruct
+
+type recursiveStringer int
+
+func (s recursiveStringer) String() string {
+ fmt.Sprintf("%d", s)
+ fmt.Sprintf("%#v", s)
+ fmt.Sprintf("%v", s) // ERROR "arg s for printf causes recursive call to String method"
+ fmt.Sprintf("%v", &s) // ERROR "arg &s for printf causes recursive call to String method"
+ fmt.Sprintf("%T", s) // ok; does not recursively call String
+ return fmt.Sprintln(s) // ERROR "arg s for print causes recursive call to String method"
+}
+
+type recursivePtrStringer int
+
+func (p *recursivePtrStringer) String() string {
+ fmt.Sprintf("%v", *p)
+ return fmt.Sprintln(p) // ERROR "arg p for print causes recursive call to String method"
+}
+
+type Formatter bool
+
+func (*Formatter) Format(fmt.State, rune) {
+}
+
+type RecursiveSlice []RecursiveSlice
+
+var recursiveSliceV = &RecursiveSlice{}
+
+type RecursiveMap map[int]RecursiveMap
+
+var recursiveMapV = make(RecursiveMap)
+
+type RecursiveStruct struct {
+ next *RecursiveStruct
+}
+
+var recursiveStructV = &RecursiveStruct{}
+
+type RecursiveStruct1 struct {
+ next *Recursive2Struct
+}
+
+type RecursiveStruct2 struct {
+ next *Recursive1Struct
+}
+
+var recursiveStruct1V = &RecursiveStruct1{}
+
+// Fix for issue 7149: Missing return type on String method caused fault.
+func (int) String() {
+ return ""
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/rangeloop.go b/llgo/third_party/go.tools/cmd/vet/testdata/rangeloop.go
new file mode 100644
index 0000000000000000000000000000000000000000..37b5940ddd2137e94259d7736c0df52ab6c4e070
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/rangeloop.go
@@ -0,0 +1,59 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the rangeloop checker.
+
+package testdata
+
+func RangeLoopTests() {
+ var s []int
+ for i, v := range s {
+ go func() {
+ println(i) // ERROR "range variable i captured by func literal"
+ println(v) // ERROR "range variable v captured by func literal"
+ }()
+ }
+ for i, v := range s {
+ defer func() {
+ println(i) // ERROR "range variable i captured by func literal"
+ println(v) // ERROR "range variable v captured by func literal"
+ }()
+ }
+ for i := range s {
+ go func() {
+ println(i) // ERROR "range variable i captured by func literal"
+ }()
+ }
+ for _, v := range s {
+ go func() {
+ println(v) // ERROR "range variable v captured by func literal"
+ }()
+ }
+ for i, v := range s {
+ go func() {
+ println(i, v)
+ }()
+ println("unfortunately, we don't catch the error above because of this statement")
+ }
+ for i, v := range s {
+ go func(i, v int) {
+ println(i, v)
+ }(i, v)
+ }
+ for i, v := range s {
+ i, v := i, v
+ go func() {
+ println(i, v)
+ }()
+ }
+ // If the key of the range statement is not an identifier
+ // the code should not panic (it used to).
+ var x [2]int
+ var f int
+ for x[0], f = range s {
+ go func() {
+ _ = f // ERROR "range variable f captured by func literal"
+ }()
+ }
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/shadow.go b/llgo/third_party/go.tools/cmd/vet/testdata/shadow.go
new file mode 100644
index 0000000000000000000000000000000000000000..34a680681be337baa4ab0aa4e96e1338e820c33f
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/shadow.go
@@ -0,0 +1,54 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the shadowed variable checker.
+// Some of these errors are caught by the compiler (shadowed return parameters for example)
+// but are nonetheless useful tests.
+
+package testdata
+
+import "os"
+
+func ShadowRead(f *os.File, buf []byte) (err error) {
+ var x int
+ if f != nil {
+ err := 3 // OK - different type.
+ _ = err
+ }
+ if f != nil {
+ _, err := f.Read(buf) // ERROR "declaration of err shadows declaration at testdata/shadow.go:13"
+ if err != nil {
+ return err
+ }
+ i := 3 // OK
+ _ = i
+ }
+ if f != nil {
+ var _, err = f.Read(buf) // ERROR "declaration of err shadows declaration at testdata/shadow.go:13"
+ if err != nil {
+ return err
+ }
+ }
+ for i := 0; i < 10; i++ {
+ i := i // OK: obviously intentional idiomatic redeclaration
+ go func() {
+ println(i)
+ }()
+ }
+ var shadowTemp interface{}
+ switch shadowTemp := shadowTemp.(type) { // OK: obviously intentional idiomatic redeclaration
+ case int:
+ println("OK")
+ _ = shadowTemp
+ }
+ if shadowTemp := shadowTemp; true { // OK: obviously intentional idiomatic redeclaration
+ var f *os.File // OK because f is not mentioned later in the function.
+ // The declaration of x is a shadow because x is mentioned below.
+ var x int // ERROR "declaration of x shadows declaration at testdata/shadow.go:14"
+ _, _, _ = x, f, shadowTemp
+ }
+ // Use a couple of variables to trigger shadowing errors.
+ _, _ = err, x
+ return
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/shift.go b/llgo/third_party/go.tools/cmd/vet/testdata/shift.go
new file mode 100644
index 0000000000000000000000000000000000000000..6624f09cc1617bd19a4991104f945a28428574bc
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/shift.go
@@ -0,0 +1,78 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the suspicious shift checker.
+
+package testdata
+
+func ShiftTest() {
+ var i8 int8
+ _ = i8 << 7
+ _ = (i8 + 1) << 8 // ERROR "\(i8 \+ 1\) too small for shift of 8"
+ _ = i8 << (7 + 1) // ERROR "i8 too small for shift of 8"
+ _ = i8 >> 8 // ERROR "i8 too small for shift of 8"
+ i8 <<= 8 // ERROR "i8 too small for shift of 8"
+ i8 >>= 8 // ERROR "i8 too small for shift of 8"
+ var i16 int16
+ _ = i16 << 15
+ _ = i16 << 16 // ERROR "i16 too small for shift of 16"
+ _ = i16 >> 16 // ERROR "i16 too small for shift of 16"
+ i16 <<= 16 // ERROR "i16 too small for shift of 16"
+ i16 >>= 16 // ERROR "i16 too small for shift of 16"
+ var i32 int32
+ _ = i32 << 31
+ _ = i32 << 32 // ERROR "i32 too small for shift of 32"
+ _ = i32 >> 32 // ERROR "i32 too small for shift of 32"
+ i32 <<= 32 // ERROR "i32 too small for shift of 32"
+ i32 >>= 32 // ERROR "i32 too small for shift of 32"
+ var i64 int64
+ _ = i64 << 63
+ _ = i64 << 64 // ERROR "i64 too small for shift of 64"
+ _ = i64 >> 64 // ERROR "i64 too small for shift of 64"
+ i64 <<= 64 // ERROR "i64 too small for shift of 64"
+ i64 >>= 64 // ERROR "i64 too small for shift of 64"
+ var u8 uint8
+ _ = u8 << 7
+ _ = u8 << 8 // ERROR "u8 too small for shift of 8"
+ _ = u8 >> 8 // ERROR "u8 too small for shift of 8"
+ u8 <<= 8 // ERROR "u8 too small for shift of 8"
+ u8 >>= 8 // ERROR "u8 too small for shift of 8"
+ var u16 uint16
+ _ = u16 << 15
+ _ = u16 << 16 // ERROR "u16 too small for shift of 16"
+ _ = u16 >> 16 // ERROR "u16 too small for shift of 16"
+ u16 <<= 16 // ERROR "u16 too small for shift of 16"
+ u16 >>= 16 // ERROR "u16 too small for shift of 16"
+ var u32 uint32
+ _ = u32 << 31
+ _ = u32 << 32 // ERROR "u32 too small for shift of 32"
+ _ = u32 >> 32 // ERROR "u32 too small for shift of 32"
+ u32 <<= 32 // ERROR "u32 too small for shift of 32"
+ u32 >>= 32 // ERROR "u32 too small for shift of 32"
+ var u64 uint64
+ _ = u64 << 63
+ _ = u64 << 64 // ERROR "u64 too small for shift of 64"
+ _ = u64 >> 64 // ERROR "u64 too small for shift of 64"
+ u64 <<= 64 // ERROR "u64 too small for shift of 64"
+ u64 >>= 64 // ERROR "u64 too small for shift of 64"
+ _ = u64 << u64 // Non-constant shifts should succeed.
+ var i int
+ _ = i << 31
+ _ = i << 32 // ERROR "i might be too small for shift of 32"
+ _ = i >> 32 // ERROR "i might be too small for shift of 32"
+ i <<= 32 // ERROR "i might be too small for shift of 32"
+ i >>= 32 // ERROR "i might be too small for shift of 32"
+ var u uint
+ _ = u << 31
+ _ = u << 32 // ERROR "u might be too small for shift of 32"
+ _ = u >> 32 // ERROR "u might be too small for shift of 32"
+ u <<= 32 // ERROR "u might be too small for shift of 32"
+ u >>= 32 // ERROR "u might be too small for shift of 32"
+ var p uintptr
+ _ = p << 31
+ _ = p << 32 // ERROR "p might be too small for shift of 32"
+ _ = p >> 32 // ERROR "p might be too small for shift of 32"
+ p <<= 32 // ERROR "p might be too small for shift of 32"
+ p >>= 32 // ERROR "p might be too small for shift of 32"
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/structtag.go b/llgo/third_party/go.tools/cmd/vet/testdata/structtag.go
new file mode 100644
index 0000000000000000000000000000000000000000..55462e5a45766986e720b5ede5a996e8fefc756c
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/structtag.go
@@ -0,0 +1,27 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for the structtag checker.
+
+// This file contains the test for canonical struct tags.
+
+package testdata
+
+type StructTagTest struct {
+ X int "hello" // ERROR "not compatible with reflect.StructTag.Get"
+}
+
+type UnexportedEncodingTagTest struct {
+ x int `json:"xx"` // ERROR "struct field x has json tag but is not exported"
+ y int `xml:"yy"` // ERROR "struct field y has xml tag but is not exported"
+ z int
+ A int `json:"aa" xml:"bb"`
+}
+
+type unexp struct{}
+
+type JSONEmbeddedField struct {
+ UnexportedEncodingTagTest `is:"embedded"`
+ unexp `is:"embedded,notexported" json:"unexp"` // OK for now, see issue 7363
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/testdata/unsafeptr.go b/llgo/third_party/go.tools/cmd/vet/testdata/unsafeptr.go
new file mode 100644
index 0000000000000000000000000000000000000000..8f64030b85689b75edbd9fd826cadb1d65308dd8
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/testdata/unsafeptr.go
@@ -0,0 +1,61 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testdata
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+func f() {
+ var x unsafe.Pointer
+ var y uintptr
+ x = unsafe.Pointer(y) // ERROR "possible misuse of unsafe.Pointer"
+ y = uintptr(x)
+
+ // only allowed pointer arithmetic is ptr +/- num.
+ // num+ptr is technically okay but still flagged: write ptr+num instead.
+ x = unsafe.Pointer(uintptr(x) + 1)
+ x = unsafe.Pointer(1 + uintptr(x)) // ERROR "possible misuse of unsafe.Pointer"
+ x = unsafe.Pointer(uintptr(x) + uintptr(x)) // ERROR "possible misuse of unsafe.Pointer"
+ x = unsafe.Pointer(uintptr(x) - 1)
+ x = unsafe.Pointer(1 - uintptr(x)) // ERROR "possible misuse of unsafe.Pointer"
+
+ // certain uses of reflect are okay
+ var v reflect.Value
+ x = unsafe.Pointer(v.Pointer())
+ x = unsafe.Pointer(v.UnsafeAddr())
+ var s1 *reflect.StringHeader
+ x = unsafe.Pointer(s1.Data)
+ var s2 *reflect.SliceHeader
+ x = unsafe.Pointer(s2.Data)
+ var s3 reflect.StringHeader
+ x = unsafe.Pointer(s3.Data) // ERROR "possible misuse of unsafe.Pointer"
+ var s4 reflect.SliceHeader
+ x = unsafe.Pointer(s4.Data) // ERROR "possible misuse of unsafe.Pointer"
+
+ // but only in reflect
+ var vv V
+ x = unsafe.Pointer(vv.Pointer()) // ERROR "possible misuse of unsafe.Pointer"
+ x = unsafe.Pointer(vv.UnsafeAddr()) // ERROR "possible misuse of unsafe.Pointer"
+ var ss1 *StringHeader
+ x = unsafe.Pointer(ss1.Data) // ERROR "possible misuse of unsafe.Pointer"
+ var ss2 *SliceHeader
+ x = unsafe.Pointer(ss2.Data) // ERROR "possible misuse of unsafe.Pointer"
+
+}
+
+type V interface {
+ Pointer() uintptr
+ UnsafeAddr() uintptr
+}
+
+type StringHeader struct {
+ Data uintptr
+}
+
+type SliceHeader struct {
+ Data uintptr
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/types.go b/llgo/third_party/go.tools/cmd/vet/types.go
new file mode 100644
index 0000000000000000000000000000000000000000..6128199b62ff7a86df9561a5f9edeaa4d6701d65
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/types.go
@@ -0,0 +1,362 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the pieces of the tool that use typechecking from the go/types package.
+
+package main
+
+import (
+ "go/ast"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// imports is the canonical map of imported packages we need for typechecking.
+// It is created during initialization.
+var imports = make(map[string]*types.Package)
+
+var (
+ stringerMethodType = types.New("func() string")
+ errorType = types.New("error").Underlying().(*types.Interface)
+ stringerType = types.New("interface{ String() string }").(*types.Interface)
+ formatterType *types.Interface
+)
+
+func init() {
+ typ := importType("fmt", "Formatter")
+ if typ != nil {
+ formatterType = typ.Underlying().(*types.Interface)
+ }
+}
+
+// importType returns the type denoted by the qualified identifier
+// path.name, and adds the respective package to the imports map
+// as a side effect.
+func importType(path, name string) types.Type {
+ pkg, err := types.DefaultImport(imports, path)
+ if err != nil {
+ warnf("import failed: %v", err)
+ return nil
+ }
+ if obj, ok := pkg.Scope().Lookup(name).(*types.TypeName); ok {
+ return obj.Type()
+ }
+ warnf("invalid type name %q", name)
+ return nil
+}
+
+func (pkg *Package) check(fs *token.FileSet, astFiles []*ast.File) error {
+ pkg.defs = make(map[*ast.Ident]types.Object)
+ pkg.uses = make(map[*ast.Ident]types.Object)
+ pkg.spans = make(map[types.Object]Span)
+ pkg.types = make(map[ast.Expr]types.TypeAndValue)
+ config := types.Config{
+ // We provide the same packages map for all imports to ensure
+ // that everybody sees identical packages for the given paths.
+ Packages: imports,
+ // By providing a Config with our own error function, it will continue
+ // past the first error. There is no need for that function to do anything.
+ Error: func(error) {},
+ }
+ info := &types.Info{
+ Types: pkg.types,
+ Defs: pkg.defs,
+ Uses: pkg.uses,
+ }
+ typesPkg, err := config.Check(pkg.path, fs, astFiles, info)
+ pkg.typesPkg = typesPkg
+ // update spans
+ for id, obj := range pkg.defs {
+ pkg.growSpan(id, obj)
+ }
+ for id, obj := range pkg.uses {
+ pkg.growSpan(id, obj)
+ }
+ return err
+}
+
+// isStruct reports whether the composite literal c is a struct.
+// If it is not (probably a struct), it returns a printable form of the type.
+func (pkg *Package) isStruct(c *ast.CompositeLit) (bool, string) {
+ // Check that the CompositeLit's type is a slice or array (which needs no field keys), if possible.
+ typ := pkg.types[c].Type
+ // If it's a named type, pull out the underlying type. If it's not, the Underlying
+ // method returns the type itself.
+ actual := typ
+ if actual != nil {
+ actual = actual.Underlying()
+ }
+ if actual == nil {
+ // No type information available. Assume true, so we do the check.
+ return true, ""
+ }
+ switch actual.(type) {
+ case *types.Struct:
+ return true, typ.String()
+ default:
+ return false, ""
+ }
+}
+
+// matchArgType reports an error if printf verb t is not appropriate
+// for operand arg.
+//
+// typ is used only for recursive calls; external callers must supply nil.
+//
+// (Recursion arises from the compound types {map,chan,slice} which
+// may be printed with %d etc. if that is appropriate for their element
+// types.)
+func (f *File) matchArgType(t printfArgType, typ types.Type, arg ast.Expr) bool {
+ return f.matchArgTypeInternal(t, typ, arg, make(map[types.Type]bool))
+}
+
+// matchArgTypeInternal is the internal version of matchArgType. It carries a map
+// remembering what types are in progress so we don't recur when faced with recursive
+// types or mutually recursive types.
+func (f *File) matchArgTypeInternal(t printfArgType, typ types.Type, arg ast.Expr, inProgress map[types.Type]bool) bool {
+ // %v, %T accept any argument type.
+ if t == anyType {
+ return true
+ }
+ if typ == nil {
+ // external call
+ typ = f.pkg.types[arg].Type
+ if typ == nil {
+ return true // probably a type check problem
+ }
+ }
+ // If the type implements fmt.Formatter, we have nothing to check.
+ // But (see issue 6259) that's not easy to verify, so instead we see
+ // if its method set contains a Format function. We could do better,
+ // even now, but we don't need to be 100% accurate. Wait for 6259 to
+ // be fixed instead. TODO.
+ if f.hasMethod(typ, "Format") {
+ return true
+ }
+ // If we can use a string, might arg (dynamically) implement the Stringer or Error interface?
+ if t&argString != 0 {
+ if types.AssertableTo(errorType, typ) || types.AssertableTo(stringerType, typ) {
+ return true
+ }
+ }
+
+ typ = typ.Underlying()
+ if inProgress[typ] {
+ // We're already looking at this type. The call that started it will take care of it.
+ return true
+ }
+ inProgress[typ] = true
+
+ switch typ := typ.(type) {
+ case *types.Signature:
+ return t&argPointer != 0
+
+ case *types.Map:
+ // Recur: map[int]int matches %d.
+ return t&argPointer != 0 ||
+ (f.matchArgTypeInternal(t, typ.Key(), arg, inProgress) && f.matchArgTypeInternal(t, typ.Elem(), arg, inProgress))
+
+ case *types.Chan:
+ return t&argPointer != 0
+
+ case *types.Array:
+ // Same as slice.
+ if types.Identical(typ.Elem().Underlying(), types.Typ[types.Byte]) && t&argString != 0 {
+ return true // %s matches []byte
+ }
+ // Recur: []int matches %d.
+ return t&argPointer != 0 || f.matchArgTypeInternal(t, typ.Elem().Underlying(), arg, inProgress)
+
+ case *types.Slice:
+ // Same as array.
+ if types.Identical(typ.Elem().Underlying(), types.Typ[types.Byte]) && t&argString != 0 {
+ return true // %s matches []byte
+ }
+ // Recur: []int matches %d. But watch out for
+ // type T []T
+ // If the element is a pointer type (type T[]*T), it's handled fine by the Pointer case below.
+ return t&argPointer != 0 || f.matchArgTypeInternal(t, typ.Elem(), arg, inProgress)
+
+ case *types.Pointer:
+ // Ugly, but dealing with an edge case: a known pointer to an invalid type,
+ // probably something from a failed import.
+ if typ.Elem().String() == "invalid type" {
+ if *verbose {
+ f.Warnf(arg.Pos(), "printf argument %v is pointer to invalid or unknown type", f.gofmt(arg))
+ }
+ return true // special case
+ }
+ // If it's actually a pointer with %p, it prints as one.
+ if t == argPointer {
+ return true
+ }
+ // If it's pointer to struct, that's equivalent in our analysis to whether we can print the struct.
+ if str, ok := typ.Elem().Underlying().(*types.Struct); ok {
+ return f.matchStructArgType(t, str, arg, inProgress)
+ }
+ // The rest can print with %p as pointers, or as integers with %x etc.
+ return t&(argInt|argPointer) != 0
+
+ case *types.Struct:
+ return f.matchStructArgType(t, typ, arg, inProgress)
+
+ case *types.Interface:
+ // If the static type of the argument is empty interface, there's little we can do.
+ // Example:
+ // func f(x interface{}) { fmt.Printf("%s", x) }
+ // Whether x is valid for %s depends on the type of the argument to f. One day
+ // we will be able to do better. For now, we assume that empty interface is OK
+ // but non-empty interfaces, with Stringer and Error handled above, are errors.
+ return typ.NumMethods() == 0
+
+ case *types.Basic:
+ switch typ.Kind() {
+ case types.UntypedBool,
+ types.Bool:
+ return t&argBool != 0
+
+ case types.UntypedInt,
+ types.Int,
+ types.Int8,
+ types.Int16,
+ types.Int32,
+ types.Int64,
+ types.Uint,
+ types.Uint8,
+ types.Uint16,
+ types.Uint32,
+ types.Uint64,
+ types.Uintptr:
+ return t&argInt != 0
+
+ case types.UntypedFloat,
+ types.Float32,
+ types.Float64:
+ return t&argFloat != 0
+
+ case types.UntypedComplex,
+ types.Complex64,
+ types.Complex128:
+ return t&argComplex != 0
+
+ case types.UntypedString,
+ types.String:
+ return t&argString != 0
+
+ case types.UnsafePointer:
+ return t&(argPointer|argInt) != 0
+
+ case types.UntypedRune:
+ return t&(argInt|argRune) != 0
+
+ case types.UntypedNil:
+ return t&argPointer != 0 // TODO?
+
+ case types.Invalid:
+ if *verbose {
+ f.Warnf(arg.Pos(), "printf argument %v has invalid or unknown type", f.gofmt(arg))
+ }
+ return true // Probably a type check problem.
+ }
+ panic("unreachable")
+ }
+
+ return false
+}
+
+// hasBasicType reports whether x's type is a types.Basic with the given kind.
+func (f *File) hasBasicType(x ast.Expr, kind types.BasicKind) bool {
+ t := f.pkg.types[x].Type
+ if t != nil {
+ t = t.Underlying()
+ }
+ b, ok := t.(*types.Basic)
+ return ok && b.Kind() == kind
+}
+
+// matchStructArgType reports whether all the elements of the struct match the expected
+// type. For instance, with "%d" all the elements must be printable with the "%d" format.
+func (f *File) matchStructArgType(t printfArgType, typ *types.Struct, arg ast.Expr, inProgress map[types.Type]bool) bool {
+ for i := 0; i < typ.NumFields(); i++ {
+ if !f.matchArgTypeInternal(t, typ.Field(i).Type(), arg, inProgress) {
+ return false
+ }
+ }
+ return true
+}
+
+// numArgsInSignature tells how many formal arguments the function type
+// being called has.
+func (f *File) numArgsInSignature(call *ast.CallExpr) int {
+ // Check the type of the function or method declaration
+ typ := f.pkg.types[call.Fun].Type
+ if typ == nil {
+ return 0
+ }
+ // The type must be a signature, but be sure for safety.
+ sig, ok := typ.(*types.Signature)
+ if !ok {
+ return 0
+ }
+ return sig.Params().Len()
+}
+
+// isErrorMethodCall reports whether the call is of a method with signature
+// func Error() string
+// where "string" is the universe's string type. We know the method is called "Error".
+func (f *File) isErrorMethodCall(call *ast.CallExpr) bool {
+ typ := f.pkg.types[call].Type
+ if typ != nil {
+ // We know it's called "Error", so just check the function signature.
+ return types.Identical(f.pkg.types[call.Fun].Type, stringerMethodType)
+ }
+ // Without types, we can still check by hand.
+ // Is it a selector expression? Otherwise it's a function call, not a method call.
+ sel, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return false
+ }
+ // The package is type-checked, so if there are no arguments, we're done.
+ if len(call.Args) > 0 {
+ return false
+ }
+ // Check the type of the method declaration
+ typ = f.pkg.types[sel].Type
+ if typ == nil {
+ return false
+ }
+ // The type must be a signature, but be sure for safety.
+ sig, ok := typ.(*types.Signature)
+ if !ok {
+ return false
+ }
+ // There must be a receiver for it to be a method call. Otherwise it is
+ // a function, not something that satisfies the error interface.
+ if sig.Recv() == nil {
+ return false
+ }
+ // There must be no arguments. Already verified by type checking, but be thorough.
+ if sig.Params().Len() > 0 {
+ return false
+ }
+ // Finally the real questions.
+ // There must be one result.
+ if sig.Results().Len() != 1 {
+ return false
+ }
+ // It must have return type "string" from the universe.
+ return sig.Results().At(0).Type() == types.Typ[types.String]
+}
+
+// hasMethod reports whether the type contains a method with the given name.
+// It is part of the workaround for Formatters and should be deleted when
+// that workaround is no longer necessary.
+// TODO: This could be better once issue 6259 is fixed.
+func (f *File) hasMethod(typ types.Type, name string) bool {
+ // assume we have an addressable variable of type typ
+ obj, _, _ := types.LookupFieldOrMethod(typ, true, f.pkg.typesPkg, name)
+ _, ok := obj.(*types.Func)
+ return ok
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/unsafeptr.go b/llgo/third_party/go.tools/cmd/vet/unsafeptr.go
new file mode 100644
index 0000000000000000000000000000000000000000..3e133fe0cfcaec43ed40691173a142847386f014
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/unsafeptr.go
@@ -0,0 +1,98 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check for invalid uintptr -> unsafe.Pointer conversions.
+
+package main
+
+import (
+ "go/ast"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+func init() {
+ register("unsafeptr",
+ "check for misuse of unsafe.Pointer",
+ checkUnsafePointer,
+ callExpr)
+}
+
+func checkUnsafePointer(f *File, node ast.Node) {
+ x := node.(*ast.CallExpr)
+ if len(x.Args) != 1 {
+ return
+ }
+ if f.hasBasicType(x.Fun, types.UnsafePointer) && f.hasBasicType(x.Args[0], types.Uintptr) && !f.isSafeUintptr(x.Args[0]) {
+ f.Badf(x.Pos(), "possible misuse of unsafe.Pointer")
+ }
+}
+
+// isSafeUintptr reports whether x - already known to be a uintptr -
+// is safe to convert to unsafe.Pointer. It is safe if x is itself derived
+// directly from an unsafe.Pointer via conversion and pointer arithmetic
+// or if x is the result of reflect.Value.Pointer or reflect.Value.UnsafeAddr
+// or obtained from the Data field of a *reflect.SliceHeader or *reflect.StringHeader.
+func (f *File) isSafeUintptr(x ast.Expr) bool {
+ switch x := x.(type) {
+ case *ast.ParenExpr:
+ return f.isSafeUintptr(x.X)
+
+ case *ast.SelectorExpr:
+ switch x.Sel.Name {
+ case "Data":
+ // reflect.SliceHeader and reflect.StringHeader are okay,
+ // but only if they are pointing at a real slice or string.
+ // It's not okay to do:
+ // var x SliceHeader
+ // x.Data = uintptr(unsafe.Pointer(...))
+ // ... use x ...
+ // p := unsafe.Pointer(x.Data)
+ // because in the middle the garbage collector doesn't
+ // see x.Data as a pointer and so x.Data may be dangling
+ // by the time we get to the conversion at the end.
+ // For now approximate by saying that *Header is okay
+ // but Header is not.
+ pt, ok := f.pkg.types[x.X].Type.(*types.Pointer)
+ if ok {
+ t, ok := pt.Elem().(*types.Named)
+ if ok && t.Obj().Pkg().Path() == "reflect" {
+ switch t.Obj().Name() {
+ case "StringHeader", "SliceHeader":
+ return true
+ }
+ }
+ }
+ }
+
+ case *ast.CallExpr:
+ switch len(x.Args) {
+ case 0:
+ // maybe call to reflect.Value.Pointer or reflect.Value.UnsafeAddr.
+ sel, ok := x.Fun.(*ast.SelectorExpr)
+ if !ok {
+ break
+ }
+ switch sel.Sel.Name {
+ case "Pointer", "UnsafeAddr":
+ t, ok := f.pkg.types[sel.X].Type.(*types.Named)
+ if ok && t.Obj().Pkg().Path() == "reflect" && t.Obj().Name() == "Value" {
+ return true
+ }
+ }
+
+ case 1:
+ // maybe conversion of uintptr to unsafe.Pointer
+ return f.hasBasicType(x.Fun, types.Uintptr) && f.hasBasicType(x.Args[0], types.UnsafePointer)
+ }
+
+ case *ast.BinaryExpr:
+ switch x.Op {
+ case token.ADD, token.SUB:
+ return f.isSafeUintptr(x.X) && !f.isSafeUintptr(x.Y)
+ }
+ }
+ return false
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/vet_test.go b/llgo/third_party/go.tools/cmd/vet/vet_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..65fefb950f7f193812b3dd8059778470a8bd3cc6
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/vet_test.go
@@ -0,0 +1,74 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main_test
+
+import (
+ "bytes"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "testing"
+)
+
+const (
+ dataDir = "testdata"
+ binary = "testvet"
+)
+
+// Run this shell script, but do it in Go so it can be run by "go test".
+// go build -o testvet
+// $(GOROOT)/test/errchk ./testvet -shadow -printfuncs='Warn:1,Warnf:1' testdata/*.go testdata/*.s
+// rm testvet
+//
+func TestVet(t *testing.T) {
+ // Plan 9 and Windows systems can't be guaranteed to have Perl and so can't run errchk.
+ switch runtime.GOOS {
+ case "plan9", "windows":
+ t.Skip("skipping test; no Perl on %q", runtime.GOOS)
+ }
+
+ // go build
+ cmd := exec.Command("go", "build", "-o", binary)
+ run(cmd, t)
+
+ // defer removal of vet
+ defer os.Remove(binary)
+
+ // errchk ./testvet
+ gos, err := filepath.Glob(filepath.Join(dataDir, "*.go"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ asms, err := filepath.Glob(filepath.Join(dataDir, "*.s"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ files := append(gos, asms...)
+ errchk := filepath.Join(runtime.GOROOT(), "test", "errchk")
+ flags := []string{
+ "./" + binary,
+ "-printfuncs=Warn:1,Warnf:1",
+ "-test", // TODO: Delete once -shadow is part of -all.
+ }
+ cmd = exec.Command(errchk, append(flags, files...)...)
+ if !run(cmd, t) {
+ t.Fatal("vet command failed")
+ }
+}
+
+func run(c *exec.Cmd, t *testing.T) bool {
+ output, err := c.CombinedOutput()
+ os.Stderr.Write(output)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Errchk delights by not returning non-zero status if it finds errors, so we look at the output.
+ // It prints "BUG" if there is a failure.
+ if !c.ProcessState.Success() {
+ return false
+ }
+ return !bytes.Contains(output, []byte("BUG"))
+}
diff --git a/llgo/third_party/go.tools/cmd/vet/whitelist/whitelist.go b/llgo/third_party/go.tools/cmd/vet/whitelist/whitelist.go
new file mode 100644
index 0000000000000000000000000000000000000000..975c9e378010e687224864fdd73a96ab1ade66c5
--- /dev/null
+++ b/llgo/third_party/go.tools/cmd/vet/whitelist/whitelist.go
@@ -0,0 +1,52 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package whitelist defines exceptions for the vet tool.
+package whitelist
+
+// UnkeyedLiteral are types that are actually slices, but
+// syntactically, we cannot tell whether the Typ in pkg.Typ{1, 2, 3}
+// is a slice or a struct, so we whitelist all the standard package
+// library's exported slice types.
+var UnkeyedLiteral = map[string]bool{
+ /*
+ find $GOROOT/src -type f | grep -v _test.go | xargs grep '^type.*\[\]' | \
+ grep -v ' map\[' | sed 's,/[^/]*go.type,,' | sed 's,.*src/,,' | \
+ sed 's, ,.,' | sed 's, .*,,' | grep -v '\.[a-z]' | \
+ sort | awk '{ print "\"" $0 "\": true," }'
+ */
+ "crypto/x509/pkix.RDNSequence": true,
+ "crypto/x509/pkix.RelativeDistinguishedNameSET": true,
+ "database/sql.RawBytes": true,
+ "debug/macho.LoadBytes": true,
+ "encoding/asn1.ObjectIdentifier": true,
+ "encoding/asn1.RawContent": true,
+ "encoding/json.RawMessage": true,
+ "encoding/xml.CharData": true,
+ "encoding/xml.Comment": true,
+ "encoding/xml.Directive": true,
+ "go/scanner.ErrorList": true,
+ "image/color.Palette": true,
+ "net.HardwareAddr": true,
+ "net.IP": true,
+ "net.IPMask": true,
+ "sort.Float64Slice": true,
+ "sort.IntSlice": true,
+ "sort.StringSlice": true,
+ "unicode.SpecialCase": true,
+
+ // These image and image/color struct types are frozen. We will never add fields to them.
+ "image/color.Alpha16": true,
+ "image/color.Alpha": true,
+ "image/color.Gray16": true,
+ "image/color.Gray": true,
+ "image/color.NRGBA64": true,
+ "image/color.NRGBA": true,
+ "image/color.RGBA64": true,
+ "image/color.RGBA": true,
+ "image/color.YCbCr": true,
+ "image.Point": true,
+ "image.Rectangle": true,
+ "image.Uniform": true,
+}
diff --git a/llgo/third_party/go.tools/codereview.cfg b/llgo/third_party/go.tools/codereview.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..43dbf3ce3b817d7998070c0fe33078c4479a63d4
--- /dev/null
+++ b/llgo/third_party/go.tools/codereview.cfg
@@ -0,0 +1,2 @@
+defaultcc: golang-codereviews@googlegroups.com
+contributors: http://go.googlecode.com/hg/CONTRIBUTORS
diff --git a/llgo/third_party/go.tools/container/intsets/sparse.go b/llgo/third_party/go.tools/container/intsets/sparse.go
new file mode 100644
index 0000000000000000000000000000000000000000..0ba7cb23461b8e603b58a5e750cc8a195054f17a
--- /dev/null
+++ b/llgo/third_party/go.tools/container/intsets/sparse.go
@@ -0,0 +1,806 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package intsets provides Sparse, a compact and fast representation
+// for sparse sets of int values.
+//
+// The time complexity of the operations Len, Insert, Remove and Has
+// is in O(n) but in practice those methods are faster and more
+// space-efficient than equivalent operations on sets based on the Go
+// map type. The IsEmpty, Min, Max, Clear and TakeMin operations
+// require constant time.
+//
+package intsets
+
+// TODO(adonovan):
+// - Add SymmetricDifference(x, y *Sparse), i.e. x ∆ y.
+// - Add SubsetOf (x∖y=∅) and Intersects (x∩y≠∅) predicates.
+// - Add InsertAll(...int), RemoveAll(...int)
+// - Add 'bool changed' results for {Intersection,Difference}With too.
+//
+// TODO(adonovan): implement Dense, a dense bit vector with a similar API.
+// The space usage would be proportional to Max(), not Len(), and the
+// implementation would be based upon big.Int.
+//
+// TODO(adonovan): experiment with making the root block indirect (nil
+// iff IsEmpty). This would reduce the memory usage when empty and
+// might simplify the aliasing invariants.
+//
+// TODO(adonovan): opt: make UnionWith and Difference faster.
+// These are the hot-spots for go/pointer.
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// A Sparse is a set of int values.
+// Sparse operations (even queries) are not concurrency-safe.
+//
+// The zero value for Sparse is a valid empty set.
+//
+// Sparse sets must be copied using the Copy method, not by assigning
+// a Sparse value.
+//
+type Sparse struct {
+ // An uninitialized Sparse represents an empty set.
+ // An empty set may also be represented by
+ // root.next == root.prev == &root.
+ // In a non-empty set, root.next points to the first block and
+ // root.prev to the last.
+ // root.offset and root.bits are unused.
+ root block
+}
+
+type word uintptr
+
+const (
+ _m = ^word(0)
+ bitsPerWord = 8 << (_m>>8&1 + _m>>16&1 + _m>>32&1)
+ bitsPerBlock = 256 // optimal value for go/pointer solver performance
+ wordsPerBlock = bitsPerBlock / bitsPerWord
+)
+
+// Limit values of implementation-specific int type.
+const (
+ MaxInt = int(^uint(0) >> 1)
+ MinInt = -MaxInt - 1
+)
+
+// -- block ------------------------------------------------------------
+
+// A set is represented as a circular doubly-linked list of blocks,
+// each containing an offset and a bit array of fixed size
+// bitsPerBlock; the blocks are ordered by increasing offset.
+//
+// The set contains an element x iff the block whose offset is x - (x
+// mod bitsPerBlock) has the bit (x mod bitsPerBlock) set, where mod
+// is the Euclidean remainder.
+//
+// A block may only be empty transiently.
+//
+type block struct {
+ offset int // offset mod bitsPerBlock == 0
+ bits [wordsPerBlock]word // contains at least one set bit
+ next, prev *block // doubly-linked list of blocks
+}
+
+// wordMask returns the word index (in block.bits)
+// and single-bit mask for the block's ith bit.
+func wordMask(i uint) (w uint, mask word) {
+ w = i / bitsPerWord
+ mask = 1 << (i % bitsPerWord)
+ return
+}
+
+// insert sets the block b's ith bit and
+// returns true if it was not already set.
+//
+func (b *block) insert(i uint) bool {
+ w, mask := wordMask(i)
+ if b.bits[w]&mask == 0 {
+ b.bits[w] |= mask
+ return true
+ }
+ return false
+}
+
+// remove clears the block's ith bit and
+// returns true if the bit was previously set.
+// NB: may leave the block empty.
+//
+func (b *block) remove(i uint) bool {
+ w, mask := wordMask(i)
+ if b.bits[w]&mask != 0 {
+ b.bits[w] &^= mask
+ return true
+ }
+ return false
+}
+
+// has reports whether the block's ith bit is set.
+func (b *block) has(i uint) bool {
+ w, mask := wordMask(i)
+ return b.bits[w]&mask != 0
+}
+
+// empty reports whether b.len()==0, but more efficiently.
+func (b *block) empty() bool {
+ for _, w := range b.bits {
+ if w != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// len returns the number of set bits in block b.
+func (b *block) len() int {
+ var l int
+ for _, w := range b.bits {
+ l += popcount(w)
+ }
+ return l
+}
+
+// max returns the maximum element of the block.
+// The block must not be empty.
+//
+func (b *block) max() int {
+ bi := b.offset + bitsPerBlock
+ // Decrement bi by number of high zeros in last.bits.
+ for i := len(b.bits) - 1; i >= 0; i-- {
+ if w := b.bits[i]; w != 0 {
+ return bi - nlz(w) - 1
+ }
+ bi -= bitsPerWord
+ }
+ panic("BUG: empty block")
+}
+
+// min returns the minimum element of the block,
+// and also removes it if take is set.
+// The block must not be initially empty.
+// NB: may leave the block empty.
+//
+func (b *block) min(take bool) int {
+ for i, w := range b.bits {
+ if w != 0 {
+ tz := ntz(w)
+ if take {
+ b.bits[i] = w &^ (1 << uint(tz))
+ }
+ return b.offset + int(i*bitsPerWord) + tz
+ }
+ }
+ panic("BUG: empty block")
+}
+
+// forEach calls f for each element of block b.
+// f must not mutate b's enclosing Sparse.
+func (b *block) forEach(f func(int)) {
+ for i, w := range b.bits {
+ offset := b.offset + i*bitsPerWord
+ for bi := 0; w != 0 && bi < bitsPerWord; bi++ {
+ if w&1 != 0 {
+ f(offset)
+ }
+ offset++
+ w >>= 1
+ }
+ }
+}
+
+// offsetAndBitIndex returns the offset of the block that would
+// contain x and the bit index of x within that block.
+//
+func offsetAndBitIndex(x int) (int, uint) {
+ mod := x % bitsPerBlock
+ if mod < 0 {
+ // Euclidean (non-negative) remainder
+ mod += bitsPerBlock
+ }
+ return x - mod, uint(mod)
+}
+
+// -- Sparse --------------------------------------------------------------
+
+// start returns the root's next block, which is the root block
+// (if s.IsEmpty()) or the first true block otherwise.
+// start has the side effect of ensuring that s is properly
+// initialized.
+//
+func (s *Sparse) start() *block {
+ root := &s.root
+ if root.next == nil {
+ root.next = root
+ root.prev = root
+ } else if root.next.prev != root {
+ // Copying a Sparse x leads to pernicious corruption: the
+ // new Sparse y shares the old linked list, but iteration
+ // on y will never encounter &y.root so it goes into a
+ // loop. Fail fast before this occurs.
+ panic("A Sparse has been copied without (*Sparse).Copy()")
+ }
+
+ return root.next
+}
+
+// IsEmpty reports whether the set s is empty.
+func (s *Sparse) IsEmpty() bool {
+ return s.start() == &s.root
+}
+
+// Len returns the number of elements in the set s.
+func (s *Sparse) Len() int {
+ var l int
+ for b := s.start(); b != &s.root; b = b.next {
+ l += b.len()
+ }
+ return l
+}
+
+// Max returns the maximum element of the set s, or MinInt if s is empty.
+func (s *Sparse) Max() int {
+ if s.IsEmpty() {
+ return MinInt
+ }
+ return s.root.prev.max()
+}
+
+// Min returns the minimum element of the set s, or MaxInt if s is empty.
+func (s *Sparse) Min() int {
+ if s.IsEmpty() {
+ return MaxInt
+ }
+ return s.root.next.min(false)
+}
+
+// block returns the block that would contain offset,
+// or nil if s contains no such block.
+//
+func (s *Sparse) block(offset int) *block {
+ b := s.start()
+ for b != &s.root && b.offset <= offset {
+ if b.offset == offset {
+ return b
+ }
+ b = b.next
+ }
+ return nil
+}
+
+// Insert adds x to the set s, and reports whether the set grew.
+func (s *Sparse) Insert(x int) bool {
+ offset, i := offsetAndBitIndex(x)
+ b := s.start()
+ for b != &s.root && b.offset <= offset {
+ if b.offset == offset {
+ return b.insert(i)
+ }
+ b = b.next
+ }
+
+ // Insert new block before b.
+ new := &block{offset: offset}
+ new.next = b
+ new.prev = b.prev
+ new.prev.next = new
+ new.next.prev = new
+ return new.insert(i)
+}
+
+func (s *Sparse) removeBlock(b *block) {
+ b.prev.next = b.next
+ b.next.prev = b.prev
+}
+
+// Remove removes x from the set s, and reports whether the set shrank.
+func (s *Sparse) Remove(x int) bool {
+ offset, i := offsetAndBitIndex(x)
+ if b := s.block(offset); b != nil {
+ if !b.remove(i) {
+ return false
+ }
+ if b.empty() {
+ s.removeBlock(b)
+ }
+ return true
+ }
+ return false
+}
+
+// Clear removes all elements from the set s.
+func (s *Sparse) Clear() {
+ s.root.next = &s.root
+ s.root.prev = &s.root
+}
+
+// If set s is non-empty, TakeMin sets *p to the minimum element of
+// the set s, removes that element from the set and returns true.
+// Otherwise, it returns false and *p is undefined.
+//
+// This method may be used for iteration over a worklist like so:
+//
+// var x int
+// for worklist.TakeMin(&x) { use(x) }
+//
+func (s *Sparse) TakeMin(p *int) bool {
+ head := s.start()
+ if head == &s.root {
+ return false
+ }
+ *p = head.min(true)
+ if head.empty() {
+ s.removeBlock(head)
+ }
+ return true
+}
+
+// Has reports whether x is an element of the set s.
+func (s *Sparse) Has(x int) bool {
+ offset, i := offsetAndBitIndex(x)
+ if b := s.block(offset); b != nil {
+ return b.has(i)
+ }
+ return false
+}
+
+// forEach applies function f to each element of the set s in order.
+//
+// f must not mutate s. Consequently, forEach is not safe to expose
+// to clients. In any case, using "range s.AppendTo()" allows more
+// natural control flow with continue/break/return.
+//
+func (s *Sparse) forEach(f func(int)) {
+ for b := s.start(); b != &s.root; b = b.next {
+ b.forEach(f)
+ }
+}
+
+// Copy sets s to the value of x.
+func (s *Sparse) Copy(x *Sparse) {
+ if s == x {
+ return
+ }
+
+ xb := x.start()
+ sb := s.start()
+ for xb != &x.root {
+ if sb == &s.root {
+ sb = s.insertBlockBefore(sb)
+ }
+ sb.offset = xb.offset
+ sb.bits = xb.bits
+ xb = xb.next
+ sb = sb.next
+ }
+ s.discardTail(sb)
+}
+
+// insertBlockBefore returns a new block, inserting it before next.
+func (s *Sparse) insertBlockBefore(next *block) *block {
+ b := new(block)
+ b.next = next
+ b.prev = next.prev
+ b.prev.next = b
+ next.prev = b
+ return b
+}
+
+// discardTail removes block b and all its successors from s.
+func (s *Sparse) discardTail(b *block) {
+ if b != &s.root {
+ b.prev.next = &s.root
+ s.root.prev = b.prev
+ }
+}
+
+// IntersectionWith sets s to the intersection s ∩ x.
+func (s *Sparse) IntersectionWith(x *Sparse) {
+ if s == x {
+ return
+ }
+
+ xb := x.start()
+ sb := s.start()
+ for xb != &x.root && sb != &s.root {
+ switch {
+ case xb.offset < sb.offset:
+ xb = xb.next
+
+ case xb.offset > sb.offset:
+ sb = sb.next
+ s.removeBlock(sb.prev)
+
+ default:
+ var sum word
+ for i := range sb.bits {
+ r := xb.bits[i] & sb.bits[i]
+ sb.bits[i] = r
+ sum |= r
+ }
+ if sum != 0 {
+ sb = sb.next
+ } else {
+ // sb will be overwritten or removed
+ }
+
+ xb = xb.next
+ }
+ }
+
+ s.discardTail(sb)
+}
+
+// Intersection sets s to the intersection x ∩ y.
+func (s *Sparse) Intersection(x, y *Sparse) {
+ switch {
+ case s == x:
+ s.IntersectionWith(y)
+ return
+ case s == y:
+ s.IntersectionWith(x)
+ return
+ case x == y:
+ s.Copy(x)
+ return
+ }
+
+ xb := x.start()
+ yb := y.start()
+ sb := s.start()
+ for xb != &x.root && yb != &y.root {
+ switch {
+ case xb.offset < yb.offset:
+ xb = xb.next
+ continue
+ case xb.offset > yb.offset:
+ yb = yb.next
+ continue
+ }
+
+ if sb == &s.root {
+ sb = s.insertBlockBefore(sb)
+ }
+ sb.offset = xb.offset
+
+ var sum word
+ for i := range sb.bits {
+ r := xb.bits[i] & yb.bits[i]
+ sb.bits[i] = r
+ sum |= r
+ }
+ if sum != 0 {
+ sb = sb.next
+ } else {
+ // sb will be overwritten or removed
+ }
+
+ xb = xb.next
+ yb = yb.next
+ }
+
+ s.discardTail(sb)
+}
+
+// UnionWith sets s to the union s ∪ x, and reports whether s grew.
+func (s *Sparse) UnionWith(x *Sparse) bool {
+ if s == x {
+ return false
+ }
+
+ var changed bool
+ xb := x.start()
+ sb := s.start()
+ for xb != &x.root {
+ if sb != &s.root && sb.offset == xb.offset {
+ for i := range xb.bits {
+ if sb.bits[i] != xb.bits[i] {
+ sb.bits[i] |= xb.bits[i]
+ changed = true
+ }
+ }
+ xb = xb.next
+ } else if sb == &s.root || sb.offset > xb.offset {
+ sb = s.insertBlockBefore(sb)
+ sb.offset = xb.offset
+ sb.bits = xb.bits
+ changed = true
+
+ xb = xb.next
+ }
+ sb = sb.next
+ }
+ return changed
+}
+
+// Union sets s to the union x ∪ y.
+func (s *Sparse) Union(x, y *Sparse) {
+ switch {
+ case x == y:
+ s.Copy(x)
+ return
+ case s == x:
+ s.UnionWith(y)
+ return
+ case s == y:
+ s.UnionWith(x)
+ return
+ }
+
+ xb := x.start()
+ yb := y.start()
+ sb := s.start()
+ for xb != &x.root || yb != &y.root {
+ if sb == &s.root {
+ sb = s.insertBlockBefore(sb)
+ }
+ switch {
+ case yb == &y.root || (xb != &x.root && xb.offset < yb.offset):
+ sb.offset = xb.offset
+ sb.bits = xb.bits
+ xb = xb.next
+
+ case xb == &x.root || (yb != &y.root && yb.offset < xb.offset):
+ sb.offset = yb.offset
+ sb.bits = yb.bits
+ yb = yb.next
+
+ default:
+ sb.offset = xb.offset
+ for i := range xb.bits {
+ sb.bits[i] = xb.bits[i] | yb.bits[i]
+ }
+ xb = xb.next
+ yb = yb.next
+ }
+ sb = sb.next
+ }
+
+ s.discardTail(sb)
+}
+
+// DifferenceWith sets s to the difference s ∖ x.
+func (s *Sparse) DifferenceWith(x *Sparse) {
+ if s == x {
+ s.Clear()
+ return
+ }
+
+ xb := x.start()
+ sb := s.start()
+ for xb != &x.root && sb != &s.root {
+ switch {
+ case xb.offset > sb.offset:
+ sb = sb.next
+
+ case xb.offset < sb.offset:
+ xb = xb.next
+
+ default:
+ var sum word
+ for i := range sb.bits {
+ r := sb.bits[i] & ^xb.bits[i]
+ sb.bits[i] = r
+ sum |= r
+ }
+ sb = sb.next
+ xb = xb.next
+
+ if sum == 0 {
+ s.removeBlock(sb.prev)
+ }
+ }
+ }
+}
+
+// Difference sets s to the difference x ∖ y.
+func (s *Sparse) Difference(x, y *Sparse) {
+ switch {
+ case x == y:
+ s.Clear()
+ return
+ case s == x:
+ s.DifferenceWith(y)
+ return
+ case s == y:
+ var y2 Sparse
+ y2.Copy(y)
+ s.Difference(x, &y2)
+ return
+ }
+
+ xb := x.start()
+ yb := y.start()
+ sb := s.start()
+ for xb != &x.root && yb != &y.root {
+ if xb.offset > yb.offset {
+ // y has block, x has none
+ yb = yb.next
+ continue
+ }
+
+ if sb == &s.root {
+ sb = s.insertBlockBefore(sb)
+ }
+ sb.offset = xb.offset
+
+ switch {
+ case xb.offset < yb.offset:
+ // x has block, y has none
+ sb.bits = xb.bits
+
+ sb = sb.next
+
+ default:
+ // x and y have corresponding blocks
+ var sum word
+ for i := range sb.bits {
+ r := xb.bits[i] & ^yb.bits[i]
+ sb.bits[i] = r
+ sum |= r
+ }
+ if sum != 0 {
+ sb = sb.next
+ } else {
+ // sb will be overwritten or removed
+ }
+
+ yb = yb.next
+ }
+ xb = xb.next
+ }
+
+ for xb != &x.root {
+ if sb == &s.root {
+ sb = s.insertBlockBefore(sb)
+ }
+ sb.offset = xb.offset
+ sb.bits = xb.bits
+ sb = sb.next
+
+ xb = xb.next
+ }
+
+ s.discardTail(sb)
+}
+
+// Equals reports whether the sets s and t have the same elements.
+func (s *Sparse) Equals(t *Sparse) bool {
+ if s == t {
+ return true
+ }
+ sb := s.start()
+ tb := t.start()
+ for {
+ switch {
+ case sb == &s.root && tb == &t.root:
+ return true
+ case sb == &s.root || tb == &t.root:
+ return false
+ case sb.offset != tb.offset:
+ return false
+ case sb.bits != tb.bits:
+ return false
+ }
+
+ sb = sb.next
+ tb = tb.next
+ }
+}
+
+// String returns a human-readable description of the set s.
+func (s *Sparse) String() string {
+ var buf bytes.Buffer
+ buf.WriteByte('{')
+ s.forEach(func(x int) {
+ if buf.Len() > 1 {
+ buf.WriteByte(' ')
+ }
+ fmt.Fprintf(&buf, "%d", x)
+ })
+ buf.WriteByte('}')
+ return buf.String()
+}
+
+// BitString returns the set as a string of 1s and 0s denoting the sum
+// of the i'th powers of 2, for each i in s. A radix point, always
+// preceded by a digit, appears if the sum is non-integral.
+//
+// Examples:
+// {}.BitString() = "0"
+// {4,5}.BitString() = "110000"
+// {-3}.BitString() = "0.001"
+// {-3,0,4,5}.BitString() = "110001.001"
+//
+func (s *Sparse) BitString() string {
+ if s.IsEmpty() {
+ return "0"
+ }
+
+ min, max := s.Min(), s.Max()
+ var nbytes int
+ if max > 0 {
+ nbytes = max
+ }
+ nbytes++ // zero bit
+ radix := nbytes
+ if min < 0 {
+ nbytes += len(".") - min
+ }
+
+ b := make([]byte, nbytes)
+ for i := range b {
+ b[i] = '0'
+ }
+ if radix < nbytes {
+ b[radix] = '.'
+ }
+ s.forEach(func(x int) {
+ if x >= 0 {
+ x += len(".")
+ }
+ b[radix-x] = '1'
+ })
+ return string(b)
+}
+
+// GoString returns a string showing the internal representation of
+// the set s.
+//
+func (s *Sparse) GoString() string {
+ var buf bytes.Buffer
+ for b := s.start(); b != &s.root; b = b.next {
+ fmt.Fprintf(&buf, "block %p {offset=%d next=%p prev=%p",
+ b, b.offset, b.next, b.prev)
+ for _, w := range b.bits {
+ fmt.Fprintf(&buf, " 0%016x", w)
+ }
+ fmt.Fprintf(&buf, "}\n")
+ }
+ return buf.String()
+}
+
+// AppendTo returns the result of appending the elements of s to slice
+// in order.
+func (s *Sparse) AppendTo(slice []int) []int {
+ s.forEach(func(x int) {
+ slice = append(slice, x)
+ })
+ return slice
+}
+
+// -- Testing/debugging ------------------------------------------------
+
+// check returns an error if the representation invariants of s are violated.
+func (s *Sparse) check() error {
+ if !s.root.empty() {
+ return fmt.Errorf("non-empty root block")
+ }
+ if s.root.offset != 0 {
+ return fmt.Errorf("root block has non-zero offset %d", s.root.offset)
+ }
+ for b := s.start(); b != &s.root; b = b.next {
+ if b.offset%bitsPerBlock != 0 {
+ return fmt.Errorf("bad offset modulo: %d", b.offset)
+ }
+ if b.empty() {
+ return fmt.Errorf("empty block")
+ }
+ if b.prev.next != b {
+ return fmt.Errorf("bad prev.next link")
+ }
+ if b.next.prev != b {
+ return fmt.Errorf("bad next.prev link")
+ }
+ if b.prev != &s.root {
+ if b.offset <= b.prev.offset {
+ return fmt.Errorf("bad offset order: b.offset=%d, prev.offset=%d",
+ b.offset, b.prev.offset)
+ }
+ }
+ }
+ return nil
+}
diff --git a/llgo/third_party/go.tools/container/intsets/sparse_test.go b/llgo/third_party/go.tools/container/intsets/sparse_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..da915ff9922b9922a7282f7b289186ab02a4b624
--- /dev/null
+++ b/llgo/third_party/go.tools/container/intsets/sparse_test.go
@@ -0,0 +1,521 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package intsets_test
+
+import (
+ "fmt"
+ "log"
+ "math/rand"
+ "sort"
+ "strings"
+ "testing"
+
+ "llvm.org/llgo/third_party/go.tools/container/intsets"
+)
+
+func TestBasics(t *testing.T) {
+ var s intsets.Sparse
+ if len := s.Len(); len != 0 {
+ t.Errorf("Len({}): got %d, want 0", len)
+ }
+ if s := s.String(); s != "{}" {
+ t.Errorf("String({}): got %q, want \"{}\"", s)
+ }
+ if s.Has(3) {
+ t.Errorf("Has(3): got true, want false")
+ }
+ if err := s.Check(); err != nil {
+ t.Error(err)
+ }
+
+ if !s.Insert(3) {
+ t.Errorf("Insert(3): got false, want true")
+ }
+ if max := s.Max(); max != 3 {
+ t.Errorf("Max: got %d, want 3", max)
+ }
+
+ if !s.Insert(435) {
+ t.Errorf("Insert(435): got false, want true")
+ }
+ if s := s.String(); s != "{3 435}" {
+ t.Errorf("String({3 435}): got %q, want \"{3 435}\"", s)
+ }
+ if max := s.Max(); max != 435 {
+ t.Errorf("Max: got %d, want 435", max)
+ }
+ if len := s.Len(); len != 2 {
+ t.Errorf("Len: got %d, want 2", len)
+ }
+
+ if !s.Remove(435) {
+ t.Errorf("Remove(435): got false, want true")
+ }
+ if s := s.String(); s != "{3}" {
+ t.Errorf("String({3}): got %q, want \"{3}\"", s)
+ }
+}
+
+// Insert, Len, IsEmpty, Hash, Clear, AppendTo.
+func TestMoreBasics(t *testing.T) {
+ var set intsets.Sparse
+ set.Insert(456)
+ set.Insert(123)
+ set.Insert(789)
+ if set.Len() != 3 {
+ t.Errorf("%s.Len: got %d, want 3", set, set.Len())
+ }
+ if set.IsEmpty() {
+ t.Errorf("%s.IsEmpty: got true", set)
+ }
+ if !set.Has(123) {
+ t.Errorf("%s.Has(123): got false", set)
+ }
+ if set.Has(1234) {
+ t.Errorf("%s.Has(1234): got true", set)
+ }
+ got := set.AppendTo([]int{-1})
+ if want := []int{-1, 123, 456, 789}; fmt.Sprint(got) != fmt.Sprint(want) {
+ t.Errorf("%s.AppendTo: got %v, want %v", got, want)
+ }
+
+ set.Clear()
+
+ if set.Len() != 0 {
+ t.Errorf("Clear: got %d, want 0", set.Len())
+ }
+ if !set.IsEmpty() {
+ t.Errorf("IsEmpty: got false")
+ }
+ if set.Has(123) {
+ t.Errorf("%s.Has: got false", set)
+ }
+}
+
+func TestTakeMin(t *testing.T) {
+ var set intsets.Sparse
+ set.Insert(456)
+ set.Insert(123)
+ set.Insert(789)
+ set.Insert(-123)
+ var got int
+ for i, want := range []int{-123, 123, 456, 789} {
+ if !set.TakeMin(&got) || got != want {
+ t.Errorf("TakeMin #%d: got %d, want %d", i, got, want)
+ }
+ }
+ if set.TakeMin(&got) {
+ t.Errorf("%s.TakeMin returned true", set, got)
+ }
+ if err := set.Check(); err != nil {
+ t.Fatalf("check: %s: %#v", err, &set)
+ }
+}
+
+func TestMinAndMax(t *testing.T) {
+ values := []int{0, 456, 123, 789, -123} // elt 0 => empty set
+ wantMax := []int{intsets.MinInt, 456, 456, 789, 789}
+ wantMin := []int{intsets.MaxInt, 456, 123, 123, -123}
+
+ var set intsets.Sparse
+ for i, x := range values {
+ if i != 0 {
+ set.Insert(x)
+ }
+ if got, want := set.Min(), wantMin[i]; got != want {
+ t.Errorf("Min #%d: got %d, want %d", i, got, want)
+ }
+ if got, want := set.Max(), wantMax[i]; got != want {
+ t.Errorf("Max #%d: got %d, want %d", i, got, want)
+ }
+ }
+
+ set.Insert(intsets.MinInt)
+ if got, want := set.Min(), intsets.MinInt; got != want {
+ t.Errorf("Min: got %d, want %d", got, want)
+ }
+
+ set.Insert(intsets.MaxInt)
+ if got, want := set.Max(), intsets.MaxInt; got != want {
+ t.Errorf("Max: got %d, want %d", got, want)
+ }
+}
+
+func TestEquals(t *testing.T) {
+ var setX intsets.Sparse
+ setX.Insert(456)
+ setX.Insert(123)
+ setX.Insert(789)
+
+ if !setX.Equals(&setX) {
+ t.Errorf("Equals(%s, %s): got false", &setX, &setX)
+ }
+
+ var setY intsets.Sparse
+ setY.Insert(789)
+ setY.Insert(456)
+ setY.Insert(123)
+
+ if !setX.Equals(&setY) {
+ t.Errorf("Equals(%s, %s): got false", &setX, &setY)
+ }
+
+ setY.Insert(1)
+ if setX.Equals(&setY) {
+ t.Errorf("Equals(%s, %s): got true", &setX, &setY)
+ }
+
+ var empty intsets.Sparse
+ if setX.Equals(&empty) {
+ t.Errorf("Equals(%s, %s): got true", &setX, &empty)
+ }
+
+ // Edge case: some block (with offset=0) appears in X but not Y.
+ setY.Remove(123)
+ if setX.Equals(&setY) {
+ t.Errorf("Equals(%s, %s): got true", &setX, &setY)
+ }
+}
+
+// A pset is a parallel implementation of a set using both an intsets.Sparse
+// and a built-in hash map.
+type pset struct {
+ hash map[int]bool
+ bits intsets.Sparse
+}
+
+func makePset() *pset {
+ return &pset{hash: make(map[int]bool)}
+}
+
+func (set *pset) add(n int) {
+ prev := len(set.hash)
+ set.hash[n] = true
+ grewA := len(set.hash) > prev
+
+ grewB := set.bits.Insert(n)
+
+ if grewA != grewB {
+ panic(fmt.Sprintf("add(%d): grewA=%t grewB=%t", n, grewA, grewB))
+ }
+}
+
+func (set *pset) remove(n int) {
+ prev := len(set.hash)
+ delete(set.hash, n)
+ shrankA := len(set.hash) < prev
+
+ shrankB := set.bits.Remove(n)
+
+ if shrankA != shrankB {
+ panic(fmt.Sprintf("remove(%d): shrankA=%t shrankB=%t", n, shrankA, shrankB))
+ }
+}
+
+func (set *pset) check(t *testing.T, msg string) {
+ var eltsA []int
+ for elt := range set.hash {
+ eltsA = append(eltsA, int(elt))
+ }
+ sort.Ints(eltsA)
+
+ eltsB := set.bits.AppendTo(nil)
+
+ if a, b := fmt.Sprint(eltsA), fmt.Sprint(eltsB); a != b {
+ t.Errorf("check(%s): hash=%s bits=%s (%s)", msg, a, b, &set.bits)
+ }
+
+ if err := set.bits.Check(); err != nil {
+ t.Fatalf("Check(%s): %s: %#v", msg, err, &set.bits)
+ }
+}
+
+// randomPset returns a parallel set of random size and elements.
+func randomPset(prng *rand.Rand, maxSize int) *pset {
+ set := makePset()
+ size := int(prng.Int()) % maxSize
+ for i := 0; i < size; i++ {
+ // TODO(adonovan): benchmark how performance varies
+ // with this sparsity parameter.
+ n := int(prng.Int()) % 10000
+ set.add(n)
+ }
+ return set
+}
+
+// TestRandomMutations performs the same random adds/removes on two
+// set implementations and ensures that they compute the same result.
+func TestRandomMutations(t *testing.T) {
+ const debug = false
+
+ set := makePset()
+ prng := rand.New(rand.NewSource(0))
+ for i := 0; i < 10000; i++ {
+ n := int(prng.Int())%2000 - 1000
+ if i%2 == 0 {
+ if debug {
+ log.Printf("add %d", n)
+ }
+ set.add(n)
+ } else {
+ if debug {
+ log.Printf("remove %d", n)
+ }
+ set.remove(n)
+ }
+ if debug {
+ set.check(t, "post mutation")
+ }
+ }
+ set.check(t, "final")
+ if debug {
+ log.Print(&set.bits)
+ }
+}
+
+// TestSetOperations exercises classic set operations: ∩ , ∪, \.
+func TestSetOperations(t *testing.T) {
+ prng := rand.New(rand.NewSource(0))
+
+ // Use random sets of sizes from 0 to about 1000.
+ // For each operator, we test variations such as
+ // Z.op(X, Y), Z.op(X, Z) and Z.op(Z, Y) to exercise
+ // the degenerate cases of each method implementation.
+ for i := uint(0); i < 12; i++ {
+ X := randomPset(prng, 1<, == cases in IntersectionWith that the
+ // TestSetOperations data is too dense to cover.
+ var X, Y intsets.Sparse
+ X.Insert(1)
+ X.Insert(1000)
+ X.Insert(8000)
+ Y.Insert(1)
+ Y.Insert(2000)
+ Y.Insert(4000)
+ X.IntersectionWith(&Y)
+ if got, want := X.String(), "{1}"; got != want {
+ t.Errorf("IntersectionWith: got %s, want %s", got, want)
+ }
+}
+
+func TestBitString(t *testing.T) {
+ for _, test := range []struct {
+ input []int
+ want string
+ }{
+ {nil, "0"},
+ {[]int{0}, "1"},
+ {[]int{0, 4, 5}, "110001"},
+ {[]int{0, 7, 177}, "1" + strings.Repeat("0", 169) + "10000001"},
+ {[]int{-3, 0, 4, 5}, "110001.001"},
+ {[]int{-3}, "0.001"},
+ } {
+ var set intsets.Sparse
+ for _, x := range test.input {
+ set.Insert(x)
+ }
+ if got := set.BitString(); got != test.want {
+ t.Errorf("BitString(%s) = %s, want %s", set.String(), got, test.want)
+ }
+ }
+}
+
+func TestFailFastOnShallowCopy(t *testing.T) {
+ var x intsets.Sparse
+ x.Insert(1)
+
+ y := x // shallow copy (breaks representation invariants)
+ defer func() {
+ got := fmt.Sprint(recover())
+ want := "A Sparse has been copied without (*Sparse).Copy()"
+ if got != want {
+ t.Errorf("shallow copy: recover() = %q, want %q", got, want)
+ }
+ }()
+ y.String() // panics
+ t.Error("didn't panic as expected")
+}
+
+// -- Benchmarks -------------------------------------------------------
+
+// TODO(adonovan):
+// - Add benchmarks of each method.
+// - Gather set distributions from pointer analysis.
+// - Measure memory usage.
+
+func BenchmarkSparseBitVector(b *testing.B) {
+ prng := rand.New(rand.NewSource(0))
+ for tries := 0; tries < b.N; tries++ {
+ var x, y, z intsets.Sparse
+ for i := 0; i < 1000; i++ {
+ n := int(prng.Int()) % 100000
+ if i%2 == 0 {
+ x.Insert(n)
+ } else {
+ y.Insert(n)
+ }
+ }
+ z.Union(&x, &y)
+ z.Difference(&x, &y)
+ }
+}
+
+func BenchmarkHashTable(b *testing.B) {
+ prng := rand.New(rand.NewSource(0))
+ for tries := 0; tries < b.N; tries++ {
+ x, y, z := make(map[int]bool), make(map[int]bool), make(map[int]bool)
+ for i := 0; i < 1000; i++ {
+ n := int(prng.Int()) % 100000
+ if i%2 == 0 {
+ x[n] = true
+ } else {
+ y[n] = true
+ }
+ }
+ // union
+ for n := range x {
+ z[n] = true
+ }
+ for n := range y {
+ z[n] = true
+ }
+ // difference
+ z = make(map[int]bool)
+ for n := range y {
+ if !x[n] {
+ z[n] = true
+ }
+ }
+ }
+}
+
+func BenchmarkAppendTo(b *testing.B) {
+ prng := rand.New(rand.NewSource(0))
+ var x intsets.Sparse
+ for i := 0; i < 1000; i++ {
+ x.Insert(int(prng.Int()) % 10000)
+ }
+ var space [1000]int
+ for tries := 0; tries < b.N; tries++ {
+ x.AppendTo(space[:0])
+ }
+}
diff --git a/llgo/third_party/go.tools/container/intsets/util.go b/llgo/third_party/go.tools/container/intsets/util.go
new file mode 100644
index 0000000000000000000000000000000000000000..76e682cf4d9853c3213dacce0c955e2babae4bf4
--- /dev/null
+++ b/llgo/third_party/go.tools/container/intsets/util.go
@@ -0,0 +1,75 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package intsets
+
+var a [1 << 8]byte
+
+func init() {
+ for i := range a {
+ var n byte
+ for x := i; x != 0; x >>= 1 {
+ if x&1 != 0 {
+ n++
+ }
+ }
+ a[i] = n
+ }
+}
+
+// popcount returns the population count (number of set bits) of x.
+func popcount(x word) int {
+ return int(a[byte(x>>(0*8))] +
+ a[byte(x>>(1*8))] +
+ a[byte(x>>(2*8))] +
+ a[byte(x>>(3*8))] +
+ a[byte(x>>(4*8))] +
+ a[byte(x>>(5*8))] +
+ a[byte(x>>(6*8))] +
+ a[byte(x>>(7*8))])
+}
+
+// nlz returns the number of leading zeros of x.
+// From Hacker's Delight, fig 5.11.
+func nlz(x word) int {
+ x |= (x >> 1)
+ x |= (x >> 2)
+ x |= (x >> 4)
+ x |= (x >> 8)
+ x |= (x >> 16)
+ x |= (x >> 32)
+ return popcount(^x)
+}
+
+// ntz returns the number of trailing zeros of x.
+// From Hacker's Delight, fig 5.13.
+func ntz(x word) int {
+ if x == 0 {
+ return bitsPerWord
+ }
+ n := 1
+ if bitsPerWord == 64 {
+ if (x & 0xffffffff) == 0 {
+ n = n + 32
+ x = x >> 32
+ }
+ }
+ if (x & 0x0000ffff) == 0 {
+ n = n + 16
+ x = x >> 16
+ }
+ if (x & 0x000000ff) == 0 {
+ n = n + 8
+ x = x >> 8
+ }
+ if (x & 0x0000000f) == 0 {
+ n = n + 4
+ x = x >> 4
+ }
+ if (x & 0x00000003) == 0 {
+ n = n + 2
+ x = x >> 2
+ }
+ return n - int(x&1)
+}
diff --git a/llgo/third_party/go.tools/container/intsets/util_test.go b/llgo/third_party/go.tools/container/intsets/util_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..92a4bc58bc4ec01533ffc0df8769792628e41d56
--- /dev/null
+++ b/llgo/third_party/go.tools/container/intsets/util_test.go
@@ -0,0 +1,25 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package intsets
+
+import "testing"
+
+func TestNLZ(t *testing.T) {
+ // Test the platform-specific edge case.
+ // NB: v must be a var (not const) so that the word() conversion is dynamic.
+ // Otherwise the compiler will report an error.
+ v := uint64(0x0000801000000000)
+ n := nlz(word(v))
+ want := 32 // (on 32-bit)
+ if bitsPerWord == 64 {
+ want = 16
+ }
+ if n != want {
+ t.Errorf("%d-bit nlz(%d) = %d, want %d", bitsPerWord, v, n, want)
+ }
+}
+
+// Backdoor for testing.
+func (s *Sparse) Check() error { return s.check() }
diff --git a/llgo/third_party/go.tools/cover/profile.go b/llgo/third_party/go.tools/cover/profile.go
new file mode 100644
index 0000000000000000000000000000000000000000..1cbd7398827ecb78034329838b61969c5d1540d5
--- /dev/null
+++ b/llgo/third_party/go.tools/cover/profile.go
@@ -0,0 +1,190 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cover provides support for parsing coverage profiles
+// generated by "go test -coverprofile=cover.out".
+package cover
+
+import (
+ "bufio"
+ "fmt"
+ "math"
+ "os"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Profile represents the profiling data for a specific file.
+type Profile struct {
+ FileName string
+ Mode string
+ Blocks []ProfileBlock
+}
+
+// ProfileBlock represents a single block of profiling data.
+type ProfileBlock struct {
+ StartLine, StartCol int
+ EndLine, EndCol int
+ NumStmt, Count int
+}
+
+type byFileName []*Profile
+
+func (p byFileName) Len() int { return len(p) }
+func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName }
+func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// ParseProfiles parses profile data in the specified file and returns a
+// Profile for each source file described therein.
+func ParseProfiles(fileName string) ([]*Profile, error) {
+ pf, err := os.Open(fileName)
+ if err != nil {
+ return nil, err
+ }
+ defer pf.Close()
+
+ files := make(map[string]*Profile)
+ buf := bufio.NewReader(pf)
+ // First line is "mode: foo", where foo is "set", "count", or "atomic".
+ // Rest of file is in the format
+ // encoding/base64/base64.go:34.44,37.40 3 1
+ // where the fields are: name.go:line.column,line.column numberOfStatements count
+ s := bufio.NewScanner(buf)
+ mode := ""
+ for s.Scan() {
+ line := s.Text()
+ if mode == "" {
+ const p = "mode: "
+ if !strings.HasPrefix(line, p) || line == p {
+ return nil, fmt.Errorf("bad mode line: %v", line)
+ }
+ mode = line[len(p):]
+ continue
+ }
+ m := lineRe.FindStringSubmatch(line)
+ if m == nil {
+ return nil, fmt.Errorf("line %q doesn't match expected format: %v", m, lineRe)
+ }
+ fn := m[1]
+ p := files[fn]
+ if p == nil {
+ p = &Profile{
+ FileName: fn,
+ Mode: mode,
+ }
+ files[fn] = p
+ }
+ p.Blocks = append(p.Blocks, ProfileBlock{
+ StartLine: toInt(m[2]),
+ StartCol: toInt(m[3]),
+ EndLine: toInt(m[4]),
+ EndCol: toInt(m[5]),
+ NumStmt: toInt(m[6]),
+ Count: toInt(m[7]),
+ })
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ for _, p := range files {
+ sort.Sort(blocksByStart(p.Blocks))
+ }
+ // Generate a sorted slice.
+ profiles := make([]*Profile, 0, len(files))
+ for _, profile := range files {
+ profiles = append(profiles, profile)
+ }
+ sort.Sort(byFileName(profiles))
+ return profiles, nil
+}
+
+type blocksByStart []ProfileBlock
+
+func (b blocksByStart) Len() int { return len(b) }
+func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b blocksByStart) Less(i, j int) bool {
+ bi, bj := b[i], b[j]
+ return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol
+}
+
+var lineRe = regexp.MustCompile(`^(.+):([0-9]+).([0-9]+),([0-9]+).([0-9]+) ([0-9]+) ([0-9]+)$`)
+
+func toInt(s string) int {
+ i, err := strconv.Atoi(s)
+ if err != nil {
+ panic(err)
+ }
+ return i
+}
+
+// Boundary represents the position in a source file of the beginning or end of a
+// block as reported by the coverage profile. In HTML mode, it will correspond to
+// the opening or closing of a tag and will be used to colorize the source
+type Boundary struct {
+ Offset int // Location as a byte offset in the source file.
+ Start bool // Is this the start of a block?
+ Count int // Event count from the cover profile.
+ Norm float64 // Count normalized to [0..1].
+}
+
+// Boundaries returns a Profile as a set of Boundary objects within the provided src.
+func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) {
+ // Find maximum count.
+ max := 0
+ for _, b := range p.Blocks {
+ if b.Count > max {
+ max = b.Count
+ }
+ }
+ // Divisor for normalization.
+ divisor := math.Log(float64(max))
+
+ // boundary returns a Boundary, populating the Norm field with a normalized Count.
+ boundary := func(offset int, start bool, count int) Boundary {
+ b := Boundary{Offset: offset, Start: start, Count: count}
+ if !start || count == 0 {
+ return b
+ }
+ if max <= 1 {
+ b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS.
+ } else if count > 0 {
+ b.Norm = math.Log(float64(count)) / divisor
+ }
+ return b
+ }
+
+ line, col := 1, 2 // TODO: Why is this 2?
+ for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); {
+ b := p.Blocks[bi]
+ if b.StartLine == line && b.StartCol == col {
+ boundaries = append(boundaries, boundary(si, true, b.Count))
+ }
+ if b.EndLine == line && b.EndCol == col {
+ boundaries = append(boundaries, boundary(si, false, 0))
+ bi++
+ continue // Don't advance through src; maybe the next block starts here.
+ }
+ if src[si] == '\n' {
+ line++
+ col = 0
+ }
+ col++
+ si++
+ }
+ sort.Sort(boundariesByPos(boundaries))
+ return
+}
+
+type boundariesByPos []Boundary
+
+func (b boundariesByPos) Len() int { return len(b) }
+func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b boundariesByPos) Less(i, j int) bool {
+ if b[i].Offset == b[j].Offset {
+ return !b[i].Start && b[j].Start
+ }
+ return b[i].Offset < b[j].Offset
+}
diff --git a/llgo/third_party/go.tools/dashboard/README b/llgo/third_party/go.tools/dashboard/README
new file mode 100644
index 0000000000000000000000000000000000000000..12244509c0ae815a0656800f755020b97c33ca77
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/README
@@ -0,0 +1,32 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+The files in this directory constitute the continuous builder:
+
+app/: an AppEngine server. The code that runs http://build.golang.org/
+builder/: gobuilder, a Go continuous build client
+coordinator/: daemon that runs on CoreOS on Google Compute Engine and manages
+ builds (using the builder in single-shot mode) in Docker containers.
+env/: configuration files describing the environment of builders.
+ Many builders are still configured ad-hoc.
+watcher/: a daemon that watches for new commits to the Go repository and
+ its sub-repositories, and notifies the dashboard of those commits.
+
+If you wish to run a Go builder, please email golang-dev@googlegroups.com
+
+To run a builder:
+
+* Write the key ~gobuild/.gobuildkey
+ You need to get it from someone who knows the key.
+ You may also use a filename of the form .gobuildkey-$BUILDER if you
+ wish to run builders for multiple targets.
+
+* Append your username and password googlecode.com credentials from
+ https://code.google.com/hosting/settings
+ to the buildkey file in the format "Username\nPassword\n".
+ (This is for uploading tarballs to the project downloads section,
+ and is an optional step.)
+
+* Build and run gobuilder (see its documentation for command-line options).
+
diff --git a/llgo/third_party/go.tools/dashboard/app/app.yaml b/llgo/third_party/go.tools/dashboard/app/app.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8424cd0cdd960b9d7b52f63b5331c31f350bb897
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/app.yaml
@@ -0,0 +1,21 @@
+# Update with
+# google_appengine/appcfg.py [-V test-build] update .
+#
+# Using -V test-build will run as test-build.golang.org.
+
+application: golang-org
+version: build
+runtime: go
+api_version: go1
+
+handlers:
+- url: /static
+ static_dir: static
+- url: /(|gccgo/)log/.+
+ script: _go_app
+- url: /(|gccgo/)(|commit|packages|result|perf-result|tag|todo|perf|perfdetail|perfgraph|updatebenchmark)
+ script: _go_app
+- url: /(|gccgo/)(init|buildtest|key|perflearn|_ah/queue/go/delay)
+ script: _go_app
+ login: admin
+
diff --git a/llgo/third_party/go.tools/dashboard/app/build/build.go b/llgo/third_party/go.tools/dashboard/app/build/build.go
new file mode 100644
index 0000000000000000000000000000000000000000..90ca344bd810734ac6a32edcea13d6aa3c5ca24a
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/build.go
@@ -0,0 +1,911 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package build
+
+import (
+ "bytes"
+ "compress/gzip"
+ "crypto/sha1"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "appengine"
+ "appengine/datastore"
+
+ "cache"
+)
+
+const (
+ maxDatastoreStringLen = 500
+ PerfRunLength = 1024
+)
+
+// A Package describes a package that is listed on the dashboard.
+type Package struct {
+ Kind string // "subrepo", "external", or empty for the main Go tree
+ Name string
+ Path string // (empty for the main Go tree)
+ NextNum int // Num of the next head Commit
+}
+
+func (p *Package) String() string {
+ return fmt.Sprintf("%s: %q", p.Path, p.Name)
+}
+
+func (p *Package) Key(c appengine.Context) *datastore.Key {
+ key := p.Path
+ if key == "" {
+ key = "go"
+ }
+ return datastore.NewKey(c, "Package", key, 0, nil)
+}
+
+// LastCommit returns the most recent Commit for this Package.
+func (p *Package) LastCommit(c appengine.Context) (*Commit, error) {
+ var commits []*Commit
+ _, err := datastore.NewQuery("Commit").
+ Ancestor(p.Key(c)).
+ Order("-Time").
+ Limit(1).
+ GetAll(c, &commits)
+ if err != nil {
+ return nil, err
+ }
+ if len(commits) != 1 {
+ return nil, datastore.ErrNoSuchEntity
+ }
+ return commits[0], nil
+}
+
+// GetPackage fetches a Package by path from the datastore.
+func GetPackage(c appengine.Context, path string) (*Package, error) {
+ p := &Package{Path: path}
+ err := datastore.Get(c, p.Key(c), p)
+ if err == datastore.ErrNoSuchEntity {
+ return nil, fmt.Errorf("package %q not found", path)
+ }
+ return p, err
+}
+
+// A Commit describes an individual commit in a package.
+//
+// Each Commit entity is a descendant of its associated Package entity.
+// In other words, all Commits with the same PackagePath belong to the same
+// datastore entity group.
+type Commit struct {
+ PackagePath string // (empty for main repo commits)
+ Hash string
+ ParentHash string
+ Num int // Internal monotonic counter unique to this package.
+
+ User string
+ Desc string `datastore:",noindex"`
+ Time time.Time
+ NeedsBenchmarking bool
+ TryPatch bool
+
+ // ResultData is the Data string of each build Result for this Commit.
+ // For non-Go commits, only the Results for the current Go tip, weekly,
+ // and release Tags are stored here. This is purely de-normalized data.
+ // The complete data set is stored in Result entities.
+ ResultData []string `datastore:",noindex"`
+
+ // PerfResults holds a set of “builder|benchmark” tuples denoting
+ // what benchmarks have been executed on the commit.
+ PerfResults []string `datastore:",noindex"`
+
+ FailNotificationSent bool
+}
+
+func (com *Commit) Key(c appengine.Context) *datastore.Key {
+ if com.Hash == "" {
+ panic("tried Key on Commit with empty Hash")
+ }
+ p := Package{Path: com.PackagePath}
+ key := com.PackagePath + "|" + com.Hash
+ return datastore.NewKey(c, "Commit", key, 0, p.Key(c))
+}
+
+func (c *Commit) Valid() error {
+ if !validHash(c.Hash) {
+ return errors.New("invalid Hash")
+ }
+ if c.ParentHash != "" && !validHash(c.ParentHash) { // empty is OK
+ return errors.New("invalid ParentHash")
+ }
+ return nil
+}
+
+func putCommit(c appengine.Context, com *Commit) error {
+ if err := com.Valid(); err != nil {
+ return fmt.Errorf("putting Commit: %v", err)
+ }
+ if com.Num == 0 && com.ParentHash != "0000" { // 0000 is used in tests
+ return fmt.Errorf("putting Commit: invalid Num (must be > 0)")
+ }
+ if _, err := datastore.Put(c, com.Key(c), com); err != nil {
+ return fmt.Errorf("putting Commit: %v", err)
+ }
+ return nil
+}
+
+// each result line is approx 105 bytes. This constant is a tradeoff between
+// build history and the AppEngine datastore limit of 1mb.
+const maxResults = 1000
+
+// AddResult adds the denormalized Result data to the Commit's Result field.
+// It must be called from inside a datastore transaction.
+func (com *Commit) AddResult(c appengine.Context, r *Result) error {
+ if err := datastore.Get(c, com.Key(c), com); err != nil {
+ return fmt.Errorf("getting Commit: %v", err)
+ }
+
+ var resultExists bool
+ for i, s := range com.ResultData {
+ // if there already exists result data for this builder at com, overwrite it.
+ if strings.HasPrefix(s, r.Builder+"|") && strings.HasSuffix(s, "|"+r.GoHash) {
+ resultExists = true
+ com.ResultData[i] = r.Data()
+ }
+ }
+ if !resultExists {
+ // otherwise, add the new result data for this builder.
+ com.ResultData = trim(append(com.ResultData, r.Data()), maxResults)
+ }
+ return putCommit(c, com)
+}
+
+// AddPerfResult remembers that the builder has run the benchmark on the commit.
+// It must be called from inside a datastore transaction.
+func (com *Commit) AddPerfResult(c appengine.Context, builder, benchmark string) error {
+ if err := datastore.Get(c, com.Key(c), com); err != nil {
+ return fmt.Errorf("getting Commit: %v", err)
+ }
+ if !com.NeedsBenchmarking {
+ return fmt.Errorf("trying to add perf result to Commit(%v) that does not require benchmarking", com.Hash)
+ }
+ s := builder + "|" + benchmark
+ for _, v := range com.PerfResults {
+ if v == s {
+ return nil
+ }
+ }
+ com.PerfResults = append(com.PerfResults, s)
+ return putCommit(c, com)
+}
+
+func trim(s []string, n int) []string {
+ l := min(len(s), n)
+ return s[len(s)-l:]
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+// Result returns the build Result for this Commit for the given builder/goHash.
+func (c *Commit) Result(builder, goHash string) *Result {
+ for _, r := range c.ResultData {
+ p := strings.SplitN(r, "|", 4)
+ if len(p) != 4 || p[0] != builder || p[3] != goHash {
+ continue
+ }
+ return partsToHash(c, p)
+ }
+ return nil
+}
+
+// Results returns the build Results for this Commit.
+func (c *Commit) Results() (results []*Result) {
+ for _, r := range c.ResultData {
+ p := strings.SplitN(r, "|", 4)
+ if len(p) != 4 {
+ continue
+ }
+ results = append(results, partsToHash(c, p))
+ }
+ return
+}
+
+func (c *Commit) ResultGoHashes() []string {
+ // For the main repo, just return the empty string
+ // (there's no corresponding main repo hash for a main repo Commit).
+ // This function is only really useful for sub-repos.
+ if c.PackagePath == "" {
+ return []string{""}
+ }
+ var hashes []string
+ for _, r := range c.ResultData {
+ p := strings.SplitN(r, "|", 4)
+ if len(p) != 4 {
+ continue
+ }
+ // Append only new results (use linear scan to preserve order).
+ if !contains(hashes, p[3]) {
+ hashes = append(hashes, p[3])
+ }
+ }
+ // Return results in reverse order (newest first).
+ reverse(hashes)
+ return hashes
+}
+
+func contains(t []string, s string) bool {
+ for _, s2 := range t {
+ if s2 == s {
+ return true
+ }
+ }
+ return false
+}
+
+func reverse(s []string) {
+ for i := 0; i < len(s)/2; i++ {
+ j := len(s) - i - 1
+ s[i], s[j] = s[j], s[i]
+ }
+}
+
+// A CommitRun provides summary information for commits [StartCommitNum, StartCommitNum + PerfRunLength).
+// Descendant of Package.
+type CommitRun struct {
+ PackagePath string // (empty for main repo commits)
+ StartCommitNum int
+ Hash []string `datastore:",noindex"`
+ User []string `datastore:",noindex"`
+ Desc []string `datastore:",noindex"` // Only first line.
+ Time []time.Time `datastore:",noindex"`
+ NeedsBenchmarking []bool `datastore:",noindex"`
+}
+
+func (cr *CommitRun) Key(c appengine.Context) *datastore.Key {
+ p := Package{Path: cr.PackagePath}
+ key := strconv.Itoa(cr.StartCommitNum)
+ return datastore.NewKey(c, "CommitRun", key, 0, p.Key(c))
+}
+
+// GetCommitRun loads and returns CommitRun that contains information
+// for commit commitNum.
+func GetCommitRun(c appengine.Context, commitNum int) (*CommitRun, error) {
+ cr := &CommitRun{StartCommitNum: commitNum / PerfRunLength * PerfRunLength}
+ err := datastore.Get(c, cr.Key(c), cr)
+ if err != nil && err != datastore.ErrNoSuchEntity {
+ return nil, fmt.Errorf("getting CommitRun: %v", err)
+ }
+ if len(cr.Hash) != PerfRunLength {
+ cr.Hash = make([]string, PerfRunLength)
+ cr.User = make([]string, PerfRunLength)
+ cr.Desc = make([]string, PerfRunLength)
+ cr.Time = make([]time.Time, PerfRunLength)
+ cr.NeedsBenchmarking = make([]bool, PerfRunLength)
+ }
+ return cr, nil
+}
+
+func (cr *CommitRun) AddCommit(c appengine.Context, com *Commit) error {
+ if com.Num < cr.StartCommitNum || com.Num >= cr.StartCommitNum+PerfRunLength {
+ return fmt.Errorf("AddCommit: commit num %v out of range [%v, %v)",
+ com.Num, cr.StartCommitNum, cr.StartCommitNum+PerfRunLength)
+ }
+ i := com.Num - cr.StartCommitNum
+ // Be careful with string lengths,
+ // we need to fit 1024 commits into 1 MB.
+ cr.Hash[i] = com.Hash
+ cr.User[i] = shortDesc(com.User)
+ cr.Desc[i] = shortDesc(com.Desc)
+ cr.Time[i] = com.Time
+ cr.NeedsBenchmarking[i] = com.NeedsBenchmarking
+ if _, err := datastore.Put(c, cr.Key(c), cr); err != nil {
+ return fmt.Errorf("putting CommitRun: %v", err)
+ }
+ return nil
+}
+
+// GetCommits returns [startCommitNum, startCommitNum+n) commits.
+// Commits information is partial (obtained from CommitRun),
+// do not store them back into datastore.
+func GetCommits(c appengine.Context, startCommitNum, n int) ([]*Commit, error) {
+ if startCommitNum < 0 || n <= 0 {
+ return nil, fmt.Errorf("GetCommits: invalid args (%v, %v)", startCommitNum, n)
+ }
+
+ p := &Package{}
+ t := datastore.NewQuery("CommitRun").
+ Ancestor(p.Key(c)).
+ Filter("StartCommitNum >=", startCommitNum/PerfRunLength*PerfRunLength).
+ Order("StartCommitNum").
+ Limit(100).
+ Run(c)
+
+ res := make([]*Commit, n)
+ for {
+ cr := new(CommitRun)
+ _, err := t.Next(cr)
+ if err == datastore.Done {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ if cr.StartCommitNum >= startCommitNum+n {
+ break
+ }
+ // Calculate start index for copying.
+ i := 0
+ if cr.StartCommitNum < startCommitNum {
+ i = startCommitNum - cr.StartCommitNum
+ }
+ // Calculate end index for copying.
+ e := PerfRunLength
+ if cr.StartCommitNum+e > startCommitNum+n {
+ e = startCommitNum + n - cr.StartCommitNum
+ }
+ for ; i < e; i++ {
+ com := new(Commit)
+ com.Hash = cr.Hash[i]
+ com.User = cr.User[i]
+ com.Desc = cr.Desc[i]
+ com.Time = cr.Time[i]
+ com.NeedsBenchmarking = cr.NeedsBenchmarking[i]
+ res[cr.StartCommitNum-startCommitNum+i] = com
+ }
+ if e != PerfRunLength {
+ break
+ }
+ }
+ return res, nil
+}
+
+// partsToHash converts a Commit and ResultData substrings to a Result.
+func partsToHash(c *Commit, p []string) *Result {
+ return &Result{
+ Builder: p[0],
+ Hash: c.Hash,
+ PackagePath: c.PackagePath,
+ GoHash: p[3],
+ OK: p[1] == "true",
+ LogHash: p[2],
+ }
+}
+
+// A Result describes a build result for a Commit on an OS/architecture.
+//
+// Each Result entity is a descendant of its associated Package entity.
+type Result struct {
+ PackagePath string // (empty for Go commits)
+ Builder string // "os-arch[-note]"
+ Hash string
+
+ // The Go Commit this was built against (empty for Go commits).
+ GoHash string
+
+ OK bool
+ Log string `datastore:"-"` // for JSON unmarshaling only
+ LogHash string `datastore:",noindex"` // Key to the Log record.
+
+ RunTime int64 // time to build+test in nanoseconds
+}
+
+func (r *Result) Key(c appengine.Context) *datastore.Key {
+ p := Package{Path: r.PackagePath}
+ key := r.Builder + "|" + r.PackagePath + "|" + r.Hash + "|" + r.GoHash
+ return datastore.NewKey(c, "Result", key, 0, p.Key(c))
+}
+
+func (r *Result) Valid() error {
+ if !validHash(r.Hash) {
+ return errors.New("invalid Hash")
+ }
+ if r.PackagePath != "" && !validHash(r.GoHash) {
+ return errors.New("invalid GoHash")
+ }
+ return nil
+}
+
+// Data returns the Result in string format
+// to be stored in Commit's ResultData field.
+func (r *Result) Data() string {
+ return fmt.Sprintf("%v|%v|%v|%v", r.Builder, r.OK, r.LogHash, r.GoHash)
+}
+
+// A PerfResult describes all benchmarking result for a Commit.
+// Descendant of Package.
+type PerfResult struct {
+ PackagePath string
+ CommitHash string
+ CommitNum int
+ Data []string `datastore:",noindex"` // "builder|benchmark|ok|metric1=val1|metric2=val2|file:log=hash|file:cpuprof=hash"
+
+ // Local cache with parsed Data.
+ // Maps builder->benchmark->ParsedPerfResult.
+ parsedData map[string]map[string]*ParsedPerfResult
+}
+
+type ParsedPerfResult struct {
+ OK bool
+ Metrics map[string]uint64
+ Artifacts map[string]string
+}
+
+func (r *PerfResult) Key(c appengine.Context) *datastore.Key {
+ p := Package{Path: r.PackagePath}
+ key := r.CommitHash
+ return datastore.NewKey(c, "PerfResult", key, 0, p.Key(c))
+}
+
+// AddResult add the benchmarking result to r.
+// Existing result for the same builder/benchmark is replaced if already exists.
+// Returns whether the result was already present.
+func (r *PerfResult) AddResult(req *PerfRequest) bool {
+ present := false
+ str := fmt.Sprintf("%v|%v|", req.Builder, req.Benchmark)
+ for i, s := range r.Data {
+ if strings.HasPrefix(s, str) {
+ present = true
+ last := len(r.Data) - 1
+ r.Data[i] = r.Data[last]
+ r.Data = r.Data[:last]
+ break
+ }
+ }
+ ok := "ok"
+ if !req.OK {
+ ok = "false"
+ }
+ str += ok
+ for _, m := range req.Metrics {
+ str += fmt.Sprintf("|%v=%v", m.Type, m.Val)
+ }
+ for _, a := range req.Artifacts {
+ str += fmt.Sprintf("|file:%v=%v", a.Type, a.Body)
+ }
+ r.Data = append(r.Data, str)
+ r.parsedData = nil
+ return present
+}
+
+func (r *PerfResult) ParseData() map[string]map[string]*ParsedPerfResult {
+ if r.parsedData != nil {
+ return r.parsedData
+ }
+ res := make(map[string]map[string]*ParsedPerfResult)
+ for _, str := range r.Data {
+ ss := strings.Split(str, "|")
+ builder := ss[0]
+ bench := ss[1]
+ ok := ss[2]
+ m := res[builder]
+ if m == nil {
+ m = make(map[string]*ParsedPerfResult)
+ res[builder] = m
+ }
+ var p ParsedPerfResult
+ p.OK = ok == "ok"
+ p.Metrics = make(map[string]uint64)
+ p.Artifacts = make(map[string]string)
+ for _, entry := range ss[3:] {
+ if strings.HasPrefix(entry, "file:") {
+ ss1 := strings.Split(entry[len("file:"):], "=")
+ p.Artifacts[ss1[0]] = ss1[1]
+ } else {
+ ss1 := strings.Split(entry, "=")
+ val, _ := strconv.ParseUint(ss1[1], 10, 64)
+ p.Metrics[ss1[0]] = val
+ }
+ }
+ m[bench] = &p
+ }
+ r.parsedData = res
+ return res
+}
+
+// A PerfMetricRun entity holds a set of metric values for builder/benchmark/metric
+// for commits [StartCommitNum, StartCommitNum + PerfRunLength).
+// Descendant of Package.
+type PerfMetricRun struct {
+ PackagePath string
+ Builder string
+ Benchmark string
+ Metric string // e.g. realtime, cputime, gc-pause
+ StartCommitNum int
+ Vals []int64 `datastore:",noindex"`
+}
+
+func (m *PerfMetricRun) Key(c appengine.Context) *datastore.Key {
+ p := Package{Path: m.PackagePath}
+ key := m.Builder + "|" + m.Benchmark + "|" + m.Metric + "|" + strconv.Itoa(m.StartCommitNum)
+ return datastore.NewKey(c, "PerfMetricRun", key, 0, p.Key(c))
+}
+
+// GetPerfMetricRun loads and returns PerfMetricRun that contains information
+// for commit commitNum.
+func GetPerfMetricRun(c appengine.Context, builder, benchmark, metric string, commitNum int) (*PerfMetricRun, error) {
+ startCommitNum := commitNum / PerfRunLength * PerfRunLength
+ m := &PerfMetricRun{Builder: builder, Benchmark: benchmark, Metric: metric, StartCommitNum: startCommitNum}
+ err := datastore.Get(c, m.Key(c), m)
+ if err != nil && err != datastore.ErrNoSuchEntity {
+ return nil, fmt.Errorf("getting PerfMetricRun: %v", err)
+ }
+ if len(m.Vals) != PerfRunLength {
+ m.Vals = make([]int64, PerfRunLength)
+ }
+ return m, nil
+}
+
+func (m *PerfMetricRun) AddMetric(c appengine.Context, commitNum int, v uint64) error {
+ if commitNum < m.StartCommitNum || commitNum >= m.StartCommitNum+PerfRunLength {
+ return fmt.Errorf("AddMetric: CommitNum %v out of range [%v, %v)",
+ commitNum, m.StartCommitNum, m.StartCommitNum+PerfRunLength)
+ }
+ m.Vals[commitNum-m.StartCommitNum] = int64(v)
+ if _, err := datastore.Put(c, m.Key(c), m); err != nil {
+ return fmt.Errorf("putting PerfMetricRun: %v", err)
+ }
+ return nil
+}
+
+// GetPerfMetricsForCommits returns perf metrics for builder/benchmark/metric
+// and commits [startCommitNum, startCommitNum+n).
+func GetPerfMetricsForCommits(c appengine.Context, builder, benchmark, metric string, startCommitNum, n int) ([]uint64, error) {
+ if startCommitNum < 0 || n <= 0 {
+ return nil, fmt.Errorf("GetPerfMetricsForCommits: invalid args (%v, %v)", startCommitNum, n)
+ }
+
+ p := &Package{}
+ t := datastore.NewQuery("PerfMetricRun").
+ Ancestor(p.Key(c)).
+ Filter("Builder =", builder).
+ Filter("Benchmark =", benchmark).
+ Filter("Metric =", metric).
+ Filter("StartCommitNum >=", startCommitNum/PerfRunLength*PerfRunLength).
+ Order("StartCommitNum").
+ Limit(100).
+ Run(c)
+
+ res := make([]uint64, n)
+ for {
+ metrics := new(PerfMetricRun)
+ _, err := t.Next(metrics)
+ if err == datastore.Done {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ if metrics.StartCommitNum >= startCommitNum+n {
+ break
+ }
+ // Calculate start index for copying.
+ i := 0
+ if metrics.StartCommitNum < startCommitNum {
+ i = startCommitNum - metrics.StartCommitNum
+ }
+ // Calculate end index for copying.
+ e := PerfRunLength
+ if metrics.StartCommitNum+e > startCommitNum+n {
+ e = startCommitNum + n - metrics.StartCommitNum
+ }
+ for ; i < e; i++ {
+ res[metrics.StartCommitNum-startCommitNum+i] = uint64(metrics.Vals[i])
+ }
+ if e != PerfRunLength {
+ break
+ }
+ }
+ return res, nil
+}
+
+// PerfConfig holds read-mostly configuration related to benchmarking.
+// There is only one PerfConfig entity.
+type PerfConfig struct {
+ BuilderBench []string `datastore:",noindex"` // "builder|benchmark" pairs
+ BuilderProcs []string `datastore:",noindex"` // "builder|proc" pairs
+ BenchMetric []string `datastore:",noindex"` // "benchmark|metric" pairs
+ NoiseLevels []string `datastore:",noindex"` // "builder|benchmark|metric1=noise1|metric2=noise2"
+
+ // Local cache of "builder|benchmark|metric" -> noise.
+ noise map[string]float64
+}
+
+func PerfConfigKey(c appengine.Context) *datastore.Key {
+ p := Package{}
+ return datastore.NewKey(c, "PerfConfig", "PerfConfig", 0, p.Key(c))
+}
+
+const perfConfigCacheKey = "perf-config"
+
+func GetPerfConfig(c appengine.Context, r *http.Request) (*PerfConfig, error) {
+ pc := new(PerfConfig)
+ now := cache.Now(c)
+ if cache.Get(r, now, perfConfigCacheKey, pc) {
+ return pc, nil
+ }
+ err := datastore.Get(c, PerfConfigKey(c), pc)
+ if err != nil && err != datastore.ErrNoSuchEntity {
+ return nil, fmt.Errorf("GetPerfConfig: %v", err)
+ }
+ cache.Set(r, now, perfConfigCacheKey, pc)
+ return pc, nil
+}
+
+func (pc *PerfConfig) NoiseLevel(builder, benchmark, metric string) float64 {
+ if pc.noise == nil {
+ pc.noise = make(map[string]float64)
+ for _, str := range pc.NoiseLevels {
+ split := strings.Split(str, "|")
+ builderBench := split[0] + "|" + split[1]
+ for _, entry := range split[2:] {
+ metricValue := strings.Split(entry, "=")
+ noise, _ := strconv.ParseFloat(metricValue[1], 64)
+ pc.noise[builderBench+"|"+metricValue[0]] = noise
+ }
+ }
+ }
+ me := fmt.Sprintf("%v|%v|%v", builder, benchmark, metric)
+ n := pc.noise[me]
+ if n == 0 {
+ // Use a very conservative value
+ // until we have learned the real noise level.
+ n = 200
+ }
+ return n
+}
+
+// UpdatePerfConfig updates the PerfConfig entity with results of benchmarking.
+// Returns whether it's a benchmark that we have not yet seem on the builder.
+func UpdatePerfConfig(c appengine.Context, r *http.Request, req *PerfRequest) (newBenchmark bool, err error) {
+ pc, err := GetPerfConfig(c, r)
+ if err != nil {
+ return false, err
+ }
+
+ modified := false
+ add := func(arr *[]string, str string) {
+ for _, s := range *arr {
+ if s == str {
+ return
+ }
+ }
+ *arr = append(*arr, str)
+ modified = true
+ return
+ }
+
+ BenchProcs := strings.Split(req.Benchmark, "-")
+ benchmark := BenchProcs[0]
+ procs := "1"
+ if len(BenchProcs) > 1 {
+ procs = BenchProcs[1]
+ }
+
+ add(&pc.BuilderBench, req.Builder+"|"+benchmark)
+ newBenchmark = modified
+ add(&pc.BuilderProcs, req.Builder+"|"+procs)
+ for _, m := range req.Metrics {
+ add(&pc.BenchMetric, benchmark+"|"+m.Type)
+ }
+
+ if modified {
+ if _, err := datastore.Put(c, PerfConfigKey(c), pc); err != nil {
+ return false, fmt.Errorf("putting PerfConfig: %v", err)
+ }
+ cache.Tick(c)
+ }
+ return newBenchmark, nil
+}
+
+type MetricList []string
+
+func (l MetricList) Len() int {
+ return len(l)
+}
+
+func (l MetricList) Less(i, j int) bool {
+ bi := strings.HasPrefix(l[i], "build-") || strings.HasPrefix(l[i], "binary-")
+ bj := strings.HasPrefix(l[j], "build-") || strings.HasPrefix(l[j], "binary-")
+ if bi == bj {
+ return l[i] < l[j]
+ }
+ return !bi
+}
+
+func (l MetricList) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func collectList(all []string, idx int, second string) (res []string) {
+ m := make(map[string]bool)
+ for _, str := range all {
+ ss := strings.Split(str, "|")
+ v := ss[idx]
+ v2 := ss[1-idx]
+ if (second == "" || second == v2) && !m[v] {
+ m[v] = true
+ res = append(res, v)
+ }
+ }
+ sort.Sort(MetricList(res))
+ return res
+}
+
+func (pc *PerfConfig) BuildersForBenchmark(bench string) []string {
+ return collectList(pc.BuilderBench, 0, bench)
+}
+
+func (pc *PerfConfig) BenchmarksForBuilder(builder string) []string {
+ return collectList(pc.BuilderBench, 1, builder)
+}
+
+func (pc *PerfConfig) MetricsForBenchmark(bench string) []string {
+ return collectList(pc.BenchMetric, 1, bench)
+}
+
+func (pc *PerfConfig) BenchmarkProcList() (res []string) {
+ bl := pc.BenchmarksForBuilder("")
+ pl := pc.ProcList("")
+ for _, b := range bl {
+ for _, p := range pl {
+ res = append(res, fmt.Sprintf("%v-%v", b, p))
+ }
+ }
+ return res
+}
+
+func (pc *PerfConfig) ProcList(builder string) []int {
+ ss := collectList(pc.BuilderProcs, 1, builder)
+ var procs []int
+ for _, s := range ss {
+ p, _ := strconv.ParseInt(s, 10, 32)
+ procs = append(procs, int(p))
+ }
+ sort.Ints(procs)
+ return procs
+}
+
+// A PerfTodo contains outstanding commits for benchmarking for a builder.
+// Descendant of Package.
+type PerfTodo struct {
+ PackagePath string // (empty for main repo commits)
+ Builder string
+ CommitNums []int `datastore:",noindex"` // LIFO queue of commits to benchmark.
+}
+
+func (todo *PerfTodo) Key(c appengine.Context) *datastore.Key {
+ p := Package{Path: todo.PackagePath}
+ key := todo.Builder
+ return datastore.NewKey(c, "PerfTodo", key, 0, p.Key(c))
+}
+
+// AddCommitToPerfTodo adds the commit to all existing PerfTodo entities.
+func AddCommitToPerfTodo(c appengine.Context, com *Commit) error {
+ var todos []*PerfTodo
+ _, err := datastore.NewQuery("PerfTodo").
+ Ancestor((&Package{}).Key(c)).
+ GetAll(c, &todos)
+ if err != nil {
+ return fmt.Errorf("fetching PerfTodo's: %v", err)
+ }
+ for _, todo := range todos {
+ todo.CommitNums = append(todo.CommitNums, com.Num)
+ _, err = datastore.Put(c, todo.Key(c), todo)
+ if err != nil {
+ return fmt.Errorf("updating PerfTodo: %v", err)
+ }
+ }
+ return nil
+}
+
+// A Log is a gzip-compressed log file stored under the SHA1 hash of the
+// uncompressed log text.
+type Log struct {
+ CompressedLog []byte
+}
+
+func (l *Log) Text() ([]byte, error) {
+ d, err := gzip.NewReader(bytes.NewBuffer(l.CompressedLog))
+ if err != nil {
+ return nil, fmt.Errorf("reading log data: %v", err)
+ }
+ b, err := ioutil.ReadAll(d)
+ if err != nil {
+ return nil, fmt.Errorf("reading log data: %v", err)
+ }
+ return b, nil
+}
+
+func PutLog(c appengine.Context, text string) (hash string, err error) {
+ h := sha1.New()
+ io.WriteString(h, text)
+ b := new(bytes.Buffer)
+ z, _ := gzip.NewWriterLevel(b, gzip.BestCompression)
+ io.WriteString(z, text)
+ z.Close()
+ hash = fmt.Sprintf("%x", h.Sum(nil))
+ key := datastore.NewKey(c, "Log", hash, 0, nil)
+ _, err = datastore.Put(c, key, &Log{b.Bytes()})
+ return
+}
+
+// A Tag is used to keep track of the most recent Go weekly and release tags.
+// Typically there will be one Tag entity for each kind of hg tag.
+type Tag struct {
+ Kind string // "weekly", "release", or "tip"
+ Name string // the tag itself (for example: "release.r60")
+ Hash string
+}
+
+func (t *Tag) Key(c appengine.Context) *datastore.Key {
+ p := &Package{}
+ return datastore.NewKey(c, "Tag", t.Kind, 0, p.Key(c))
+}
+
+func (t *Tag) Valid() error {
+ if t.Kind != "weekly" && t.Kind != "release" && t.Kind != "tip" {
+ return errors.New("invalid Kind")
+ }
+ if !validHash(t.Hash) {
+ return errors.New("invalid Hash")
+ }
+ return nil
+}
+
+// Commit returns the Commit that corresponds with this Tag.
+func (t *Tag) Commit(c appengine.Context) (*Commit, error) {
+ com := &Commit{Hash: t.Hash}
+ err := datastore.Get(c, com.Key(c), com)
+ return com, err
+}
+
+// GetTag fetches a Tag by name from the datastore.
+func GetTag(c appengine.Context, tag string) (*Tag, error) {
+ t := &Tag{Kind: tag}
+ if err := datastore.Get(c, t.Key(c), t); err != nil {
+ if err == datastore.ErrNoSuchEntity {
+ return nil, errors.New("tag not found: " + tag)
+ }
+ return nil, err
+ }
+ if err := t.Valid(); err != nil {
+ return nil, err
+ }
+ return t, nil
+}
+
+// Packages returns packages of the specified kind.
+// Kind must be one of "external" or "subrepo".
+func Packages(c appengine.Context, kind string) ([]*Package, error) {
+ switch kind {
+ case "external", "subrepo":
+ default:
+ return nil, errors.New(`kind must be one of "external" or "subrepo"`)
+ }
+ var pkgs []*Package
+ q := datastore.NewQuery("Package").Filter("Kind=", kind)
+ for t := q.Run(c); ; {
+ pkg := new(Package)
+ _, err := t.Next(pkg)
+ if err == datastore.Done {
+ break
+ } else if err != nil {
+ return nil, err
+ }
+ if pkg.Path != "" {
+ pkgs = append(pkgs, pkg)
+ }
+ }
+ return pkgs, nil
+}
diff --git a/llgo/third_party/go.tools/dashboard/app/build/dash.go b/llgo/third_party/go.tools/dashboard/app/build/dash.go
new file mode 100644
index 0000000000000000000000000000000000000000..52ca74dad81f185376c9c8a60c53bce92262d5b0
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/dash.go
@@ -0,0 +1,118 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package build
+
+import (
+ "net/http"
+ "strings"
+
+ "appengine"
+)
+
+// Dashboard describes a unique build dashboard.
+type Dashboard struct {
+ Name string // This dashboard's name and namespace
+ RelPath string // The relative url path
+ Packages []*Package // The project's packages to build
+}
+
+// dashboardForRequest returns the appropriate dashboard for a given URL path.
+func dashboardForRequest(r *http.Request) *Dashboard {
+ if strings.HasPrefix(r.URL.Path, gccgoDash.RelPath) {
+ return gccgoDash
+ }
+ return goDash
+}
+
+// Context returns a namespaced context for this dashboard, or panics if it
+// fails to create a new context.
+func (d *Dashboard) Context(c appengine.Context) appengine.Context {
+ // No namespace needed for the original Go dashboard.
+ if d.Name == "Go" {
+ return c
+ }
+ n, err := appengine.Namespace(c, d.Name)
+ if err != nil {
+ panic(err)
+ }
+ return n
+}
+
+// the currently known dashboards.
+var dashboards = []*Dashboard{goDash, gccgoDash}
+
+// goDash is the dashboard for the main go repository.
+var goDash = &Dashboard{
+ Name: "Go",
+ RelPath: "/",
+ Packages: goPackages,
+}
+
+// goPackages is a list of all of the packages built by the main go repository.
+var goPackages = []*Package{
+ {
+ Kind: "go",
+ Name: "Go",
+ },
+ {
+ Kind: "subrepo",
+ Name: "go.blog",
+ Path: "code.google.com/p/go.blog",
+ },
+ {
+ Kind: "subrepo",
+ Name: "go.codereview",
+ Path: "code.google.com/p/go.codereview",
+ },
+ {
+ Kind: "subrepo",
+ Name: "go.crypto",
+ Path: "code.google.com/p/go.crypto",
+ },
+ {
+ Kind: "subrepo",
+ Name: "go.exp",
+ Path: "code.google.com/p/go.exp",
+ },
+ {
+ Kind: "subrepo",
+ Name: "go.image",
+ Path: "code.google.com/p/go.image",
+ },
+ {
+ Kind: "subrepo",
+ Name: "go.net",
+ Path: "code.google.com/p/go.net",
+ },
+ {
+ Kind: "subrepo",
+ Name: "go.sys",
+ Path: "code.google.com/p/go.sys",
+ },
+ {
+ Kind: "subrepo",
+ Name: "go.talks",
+ Path: "code.google.com/p/go.talks",
+ },
+ {
+ Kind: "subrepo",
+ Name: "go.tools",
+ Path: "code.google.com/p/go.tools",
+ },
+}
+
+// gccgoDash is the dashboard for gccgo.
+var gccgoDash = &Dashboard{
+ Name: "Gccgo",
+ RelPath: "/gccgo/",
+ Packages: []*Package{
+ {
+ Kind: "gccgo",
+ Name: "Gccgo",
+ },
+ },
+}
diff --git a/llgo/third_party/go.tools/dashboard/app/build/handler.go b/llgo/third_party/go.tools/dashboard/app/build/handler.go
new file mode 100644
index 0000000000000000000000000000000000000000..5d06815c064898b42fd842f6acd1006b7c0c9cce
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/handler.go
@@ -0,0 +1,906 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package build
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/md5"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "appengine"
+ "appengine/datastore"
+
+ "cache"
+ "key"
+)
+
+const commitsPerPage = 30
+const watcherVersion = 2
+
+// commitHandler retrieves commit data or records a new commit.
+//
+// For GET requests it returns a Commit value for the specified
+// packagePath and hash.
+//
+// For POST requests it reads a JSON-encoded Commit value from the request
+// body and creates a new Commit entity. It also updates the "tip" Tag for
+// each new commit at tip.
+//
+// This handler is used by a gobuilder process in -commit mode.
+func commitHandler(r *http.Request) (interface{}, error) {
+ c := contextForRequest(r)
+ com := new(Commit)
+
+ if r.Method == "GET" {
+ com.PackagePath = r.FormValue("packagePath")
+ com.Hash = r.FormValue("hash")
+ err := datastore.Get(c, com.Key(c), com)
+ if com.Num == 0 && com.Desc == "" {
+ // Perf builder might have written an incomplete Commit.
+ // Pretend it doesn't exist, so that we can get complete details.
+ err = datastore.ErrNoSuchEntity
+ }
+ if err != nil {
+ if err == datastore.ErrNoSuchEntity {
+ // This error string is special.
+ // The commit watcher expects it.
+ // Do not change it.
+ return nil, errors.New("Commit not found")
+ }
+ return nil, fmt.Errorf("getting Commit: %v", err)
+ }
+ if com.Num == 0 {
+ // Corrupt state which shouldn't happen but does.
+ // Return an error so builders' commit loops will
+ // be willing to retry submitting this commit.
+ return nil, errors.New("in datastore with zero Num")
+ }
+ if com.Desc == "" || com.User == "" {
+ // Also shouldn't happen, but at least happened
+ // once on a single commit when trying to fix data
+ // in the datastore viewer UI?
+ return nil, errors.New("missing field")
+ }
+ // Strip potentially large and unnecessary fields.
+ com.ResultData = nil
+ com.PerfResults = nil
+ return com, nil
+ }
+ if r.Method != "POST" {
+ return nil, errBadMethod(r.Method)
+ }
+ if !isMasterKey(c, r.FormValue("key")) {
+ return nil, errors.New("can only POST commits with master key")
+ }
+
+ // For now, the commit watcher doesn't support gccgo,
+ // so only do this check for Go commits.
+ // TODO(adg,cmang): remove this check when gccgo is supported.
+ if dashboardForRequest(r) == goDash {
+ v, _ := strconv.Atoi(r.FormValue("version"))
+ if v != watcherVersion {
+ return nil, fmt.Errorf("rejecting POST from commit watcher; need version %v", watcherVersion)
+ }
+ }
+
+ // POST request
+ body, err := ioutil.ReadAll(r.Body)
+ r.Body.Close()
+ if err != nil {
+ return nil, fmt.Errorf("reading Body: %v", err)
+ }
+ if !bytes.Contains(body, needsBenchmarkingBytes) {
+ c.Warningf("old builder detected at %v", r.RemoteAddr)
+ return nil, fmt.Errorf("rejecting old builder request, body does not contain %s: %q", needsBenchmarkingBytes, body)
+ }
+ if err := json.Unmarshal(body, com); err != nil {
+ return nil, fmt.Errorf("unmarshaling body %q: %v", body, err)
+ }
+ com.Desc = limitStringLength(com.Desc, maxDatastoreStringLen)
+ if err := com.Valid(); err != nil {
+ return nil, fmt.Errorf("validating Commit: %v", err)
+ }
+ defer cache.Tick(c)
+ tx := func(c appengine.Context) error {
+ return addCommit(c, com)
+ }
+ return nil, datastore.RunInTransaction(c, tx, nil)
+}
+
+var needsBenchmarkingBytes = []byte(`"NeedsBenchmarking"`)
+
+// addCommit adds the Commit entity to the datastore and updates the tip Tag.
+// It must be run inside a datastore transaction.
+func addCommit(c appengine.Context, com *Commit) error {
+ var ec Commit // existing commit
+ isUpdate := false
+ err := datastore.Get(c, com.Key(c), &ec)
+ if err != nil && err != datastore.ErrNoSuchEntity {
+ return fmt.Errorf("getting Commit: %v", err)
+ }
+ if err == nil {
+ // Commit already in the datastore. Any fields different?
+ // If not, don't do anything.
+ changes := (com.Num != 0 && com.Num != ec.Num) ||
+ com.ParentHash != ec.ParentHash ||
+ com.Desc != ec.Desc ||
+ com.User != ec.User ||
+ !com.Time.Equal(ec.Time)
+ if !changes {
+ return nil
+ }
+ ec.ParentHash = com.ParentHash
+ ec.Desc = com.Desc
+ ec.User = com.User
+ if !com.Time.IsZero() {
+ ec.Time = com.Time
+ }
+ if com.Num != 0 {
+ ec.Num = com.Num
+ }
+ isUpdate = true
+ com = &ec
+ }
+ p, err := GetPackage(c, com.PackagePath)
+ if err != nil {
+ return fmt.Errorf("GetPackage: %v", err)
+ }
+ if com.Num == 0 {
+ // get the next commit number
+ com.Num = p.NextNum
+ p.NextNum++
+ if _, err := datastore.Put(c, p.Key(c), p); err != nil {
+ return fmt.Errorf("putting Package: %v", err)
+ }
+ } else if com.Num >= p.NextNum {
+ p.NextNum = com.Num + 1
+ if _, err := datastore.Put(c, p.Key(c), p); err != nil {
+ return fmt.Errorf("putting Package: %v", err)
+ }
+ }
+ // if this isn't the first Commit test the parent commit exists.
+ // The all zeros are returned by hg's p1node template for parentless commits.
+ if com.ParentHash != "" && com.ParentHash != "0000000000000000000000000000000000000000" && com.ParentHash != "0000" {
+ n, err := datastore.NewQuery("Commit").
+ Filter("Hash =", com.ParentHash).
+ Ancestor(p.Key(c)).
+ Count(c)
+ if err != nil {
+ return fmt.Errorf("testing for parent Commit: %v", err)
+ }
+ if n == 0 {
+ return errors.New("parent commit not found")
+ }
+ }
+ // update the tip Tag if this is the Go repo and this isn't on a release branch
+ if p.Path == "" && !strings.HasPrefix(com.Desc, "[") && !isUpdate {
+ t := &Tag{Kind: "tip", Hash: com.Hash}
+ if _, err = datastore.Put(c, t.Key(c), t); err != nil {
+ return fmt.Errorf("putting Tag: %v", err)
+ }
+ }
+ // put the Commit
+ if err = putCommit(c, com); err != nil {
+ return err
+ }
+ if com.NeedsBenchmarking {
+ // add to CommitRun
+ cr, err := GetCommitRun(c, com.Num)
+ if err != nil {
+ return err
+ }
+ if err = cr.AddCommit(c, com); err != nil {
+ return err
+ }
+ // create PerfResult
+ res := &PerfResult{CommitHash: com.Hash, CommitNum: com.Num}
+ if _, err := datastore.Put(c, res.Key(c), res); err != nil {
+ return fmt.Errorf("putting PerfResult: %v", err)
+ }
+ // Update perf todo if necessary.
+ if err = AddCommitToPerfTodo(c, com); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// tagHandler records a new tag. It reads a JSON-encoded Tag value from the
+// request body and updates the Tag entity for the Kind of tag provided.
+//
+// This handler is used by a gobuilder process in -commit mode.
+func tagHandler(r *http.Request) (interface{}, error) {
+ if r.Method != "POST" {
+ return nil, errBadMethod(r.Method)
+ }
+
+ t := new(Tag)
+ defer r.Body.Close()
+ if err := json.NewDecoder(r.Body).Decode(t); err != nil {
+ return nil, err
+ }
+ if err := t.Valid(); err != nil {
+ return nil, err
+ }
+ c := contextForRequest(r)
+ defer cache.Tick(c)
+ _, err := datastore.Put(c, t.Key(c), t)
+ return nil, err
+}
+
+// Todo is a todoHandler response.
+type Todo struct {
+ Kind string // "build-go-commit" or "build-package"
+ Data interface{}
+}
+
+// todoHandler returns the next action to be performed by a builder.
+// It expects "builder" and "kind" query parameters and returns a *Todo value.
+// Multiple "kind" parameters may be specified.
+func todoHandler(r *http.Request) (interface{}, error) {
+ c := contextForRequest(r)
+ now := cache.Now(c)
+ key := "build-todo-" + r.Form.Encode()
+ var todo *Todo
+ if cache.Get(r, now, key, &todo) {
+ return todo, nil
+ }
+ var err error
+ builder := r.FormValue("builder")
+ for _, kind := range r.Form["kind"] {
+ var com *Commit
+ switch kind {
+ case "build-go-commit":
+ com, err = buildTodo(c, builder, "", "")
+ if com != nil {
+ com.PerfResults = []string{}
+ }
+ case "build-package":
+ packagePath := r.FormValue("packagePath")
+ goHash := r.FormValue("goHash")
+ com, err = buildTodo(c, builder, packagePath, goHash)
+ if com != nil {
+ com.PerfResults = []string{}
+ }
+ case "benchmark-go-commit":
+ com, err = perfTodo(c, builder)
+ }
+ if com != nil || err != nil {
+ if com != nil {
+ // ResultData can be large and not needed on builder.
+ com.ResultData = []string{}
+ }
+ todo = &Todo{Kind: kind, Data: com}
+ break
+ }
+ }
+ if err == nil {
+ cache.Set(r, now, key, todo)
+ }
+ return todo, err
+}
+
+// buildTodo returns the next Commit to be built (or nil if none available).
+//
+// If packagePath and goHash are empty, it scans the first 20 Go Commits in
+// Num-descending order and returns the first one it finds that doesn't have a
+// Result for this builder.
+//
+// If provided with non-empty packagePath and goHash args, it scans the first
+// 20 Commits in Num-descending order for the specified packagePath and
+// returns the first that doesn't have a Result for this builder and goHash.
+func buildTodo(c appengine.Context, builder, packagePath, goHash string) (*Commit, error) {
+ p, err := GetPackage(c, packagePath)
+ if err != nil {
+ return nil, err
+ }
+
+ t := datastore.NewQuery("Commit").
+ Ancestor(p.Key(c)).
+ Limit(commitsPerPage).
+ Order("-Num").
+ Run(c)
+ for {
+ com := new(Commit)
+ if _, err := t.Next(com); err == datastore.Done {
+ break
+ } else if err != nil {
+ return nil, err
+ }
+ if com.Result(builder, goHash) == nil {
+ return com, nil
+ }
+ }
+
+ // Nothing left to do if this is a package (not the Go tree).
+ if packagePath != "" {
+ return nil, nil
+ }
+
+ // If there are no Go tree commits left to build,
+ // see if there are any subrepo commits that need to be built at tip.
+ // If so, ask the builder to build a go tree at the tip commit.
+ // TODO(adg): do the same for "weekly" and "release" tags.
+
+ tag, err := GetTag(c, "tip")
+ if err != nil {
+ return nil, err
+ }
+
+ // Check that this Go commit builds OK for this builder.
+ // If not, don't re-build as the subrepos will never get built anyway.
+ com, err := tag.Commit(c)
+ if err != nil {
+ return nil, err
+ }
+ if r := com.Result(builder, ""); r != nil && !r.OK {
+ return nil, nil
+ }
+
+ pkgs, err := Packages(c, "subrepo")
+ if err != nil {
+ return nil, err
+ }
+ for _, pkg := range pkgs {
+ com, err := pkg.LastCommit(c)
+ if err != nil {
+ c.Warningf("%v: no Commit found: %v", pkg, err)
+ continue
+ }
+ if com.Result(builder, tag.Hash) == nil {
+ return tag.Commit(c)
+ }
+ }
+
+ return nil, nil
+}
+
+// perfTodo returns the next Commit to be benchmarked (or nil if none available).
+func perfTodo(c appengine.Context, builder string) (*Commit, error) {
+ p := &Package{}
+ todo := &PerfTodo{Builder: builder}
+ err := datastore.Get(c, todo.Key(c), todo)
+ if err != nil && err != datastore.ErrNoSuchEntity {
+ return nil, fmt.Errorf("fetching PerfTodo: %v", err)
+ }
+ if err == datastore.ErrNoSuchEntity {
+ todo, err = buildPerfTodo(c, builder)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if len(todo.CommitNums) == 0 {
+ return nil, nil
+ }
+
+ // Have commit to benchmark, fetch it.
+ num := todo.CommitNums[len(todo.CommitNums)-1]
+ t := datastore.NewQuery("Commit").
+ Ancestor(p.Key(c)).
+ Filter("Num =", num).
+ Limit(1).
+ Run(c)
+ com := new(Commit)
+ if _, err := t.Next(com); err != nil {
+ return nil, err
+ }
+ if !com.NeedsBenchmarking {
+ return nil, fmt.Errorf("commit from perf todo queue is not intended for benchmarking")
+ }
+
+ // Remove benchmarks from other builders.
+ var benchs []string
+ for _, b := range com.PerfResults {
+ bb := strings.Split(b, "|")
+ if bb[0] == builder && bb[1] != "meta-done" {
+ benchs = append(benchs, bb[1])
+ }
+ }
+ com.PerfResults = benchs
+
+ return com, nil
+}
+
+// buildPerfTodo creates PerfTodo for the builder with all commits. In a transaction.
+func buildPerfTodo(c appengine.Context, builder string) (*PerfTodo, error) {
+ todo := &PerfTodo{Builder: builder}
+ tx := func(c appengine.Context) error {
+ err := datastore.Get(c, todo.Key(c), todo)
+ if err != nil && err != datastore.ErrNoSuchEntity {
+ return fmt.Errorf("fetching PerfTodo: %v", err)
+ }
+ if err == nil {
+ return nil
+ }
+ t := datastore.NewQuery("CommitRun").
+ Ancestor((&Package{}).Key(c)).
+ Order("-StartCommitNum").
+ Run(c)
+ var nums []int
+ var releaseNums []int
+ loop:
+ for {
+ cr := new(CommitRun)
+ if _, err := t.Next(cr); err == datastore.Done {
+ break
+ } else if err != nil {
+ return fmt.Errorf("scanning commit runs for perf todo: %v", err)
+ }
+ for i := len(cr.Hash) - 1; i >= 0; i-- {
+ if !cr.NeedsBenchmarking[i] || cr.Hash[i] == "" {
+ continue // There's nothing to see here. Move along.
+ }
+ num := cr.StartCommitNum + i
+ for k, v := range knownTags {
+ // Releases are benchmarked first, because they are important (and there are few of them).
+ if cr.Hash[i] == v {
+ releaseNums = append(releaseNums, num)
+ if k == "go1" {
+ break loop // Point of no benchmark: test/bench/shootout: update timing.log to Go 1.
+ }
+ }
+ }
+ nums = append(nums, num)
+ }
+ }
+ todo.CommitNums = orderPerfTodo(nums)
+ todo.CommitNums = append(todo.CommitNums, releaseNums...)
+ if _, err = datastore.Put(c, todo.Key(c), todo); err != nil {
+ return fmt.Errorf("putting PerfTodo: %v", err)
+ }
+ return nil
+ }
+ return todo, datastore.RunInTransaction(c, tx, nil)
+}
+
+func removeCommitFromPerfTodo(c appengine.Context, builder string, num int) error {
+ todo := &PerfTodo{Builder: builder}
+ err := datastore.Get(c, todo.Key(c), todo)
+ if err != nil && err != datastore.ErrNoSuchEntity {
+ return fmt.Errorf("fetching PerfTodo: %v", err)
+ }
+ if err == datastore.ErrNoSuchEntity {
+ return nil
+ }
+ for i := len(todo.CommitNums) - 1; i >= 0; i-- {
+ if todo.CommitNums[i] == num {
+ for ; i < len(todo.CommitNums)-1; i++ {
+ todo.CommitNums[i] = todo.CommitNums[i+1]
+ }
+ todo.CommitNums = todo.CommitNums[:i]
+ _, err = datastore.Put(c, todo.Key(c), todo)
+ if err != nil {
+ return fmt.Errorf("putting PerfTodo: %v", err)
+ }
+ break
+ }
+ }
+ return nil
+}
+
+// packagesHandler returns a list of the non-Go Packages monitored
+// by the dashboard.
+func packagesHandler(r *http.Request) (interface{}, error) {
+ kind := r.FormValue("kind")
+ c := contextForRequest(r)
+ now := cache.Now(c)
+ key := "build-packages-" + kind
+ var p []*Package
+ if cache.Get(r, now, key, &p) {
+ return p, nil
+ }
+ p, err := Packages(c, kind)
+ if err != nil {
+ return nil, err
+ }
+ cache.Set(r, now, key, p)
+ return p, nil
+}
+
+// resultHandler records a build result.
+// It reads a JSON-encoded Result value from the request body,
+// creates a new Result entity, and updates the relevant Commit entity.
+// If the Log field is not empty, resultHandler creates a new Log entity
+// and updates the LogHash field before putting the Commit entity.
+func resultHandler(r *http.Request) (interface{}, error) {
+ if r.Method != "POST" {
+ return nil, errBadMethod(r.Method)
+ }
+
+ c := contextForRequest(r)
+ res := new(Result)
+ defer r.Body.Close()
+ if err := json.NewDecoder(r.Body).Decode(res); err != nil {
+ return nil, fmt.Errorf("decoding Body: %v", err)
+ }
+ if err := res.Valid(); err != nil {
+ return nil, fmt.Errorf("validating Result: %v", err)
+ }
+ defer cache.Tick(c)
+ // store the Log text if supplied
+ if len(res.Log) > 0 {
+ hash, err := PutLog(c, res.Log)
+ if err != nil {
+ return nil, fmt.Errorf("putting Log: %v", err)
+ }
+ res.LogHash = hash
+ }
+ tx := func(c appengine.Context) error {
+ // check Package exists
+ if _, err := GetPackage(c, res.PackagePath); err != nil {
+ return fmt.Errorf("GetPackage: %v", err)
+ }
+ // put Result
+ if _, err := datastore.Put(c, res.Key(c), res); err != nil {
+ return fmt.Errorf("putting Result: %v", err)
+ }
+ // add Result to Commit
+ com := &Commit{PackagePath: res.PackagePath, Hash: res.Hash}
+ if err := com.AddResult(c, res); err != nil {
+ return fmt.Errorf("AddResult: %v", err)
+ }
+ // Send build failure notifications, if necessary.
+ // Note this must run after the call AddResult, which
+ // populates the Commit's ResultData field.
+ return notifyOnFailure(c, com, res.Builder)
+ }
+ return nil, datastore.RunInTransaction(c, tx, nil)
+}
+
+// perf-result request payload
+type PerfRequest struct {
+ Builder string
+ Benchmark string
+ Hash string
+ OK bool
+ Metrics []PerfMetric
+ Artifacts []PerfArtifact
+}
+
+type PerfMetric struct {
+ Type string
+ Val uint64
+}
+
+type PerfArtifact struct {
+ Type string
+ Body string
+}
+
+// perfResultHandler records a becnhmarking result.
+func perfResultHandler(r *http.Request) (interface{}, error) {
+ defer r.Body.Close()
+ if r.Method != "POST" {
+ return nil, errBadMethod(r.Method)
+ }
+
+ req := new(PerfRequest)
+ if err := json.NewDecoder(r.Body).Decode(req); err != nil {
+ return nil, fmt.Errorf("decoding Body: %v", err)
+ }
+
+ c := contextForRequest(r)
+ defer cache.Tick(c)
+
+ // store the text files if supplied
+ for i, a := range req.Artifacts {
+ hash, err := PutLog(c, a.Body)
+ if err != nil {
+ return nil, fmt.Errorf("putting Log: %v", err)
+ }
+ req.Artifacts[i].Body = hash
+ }
+ tx := func(c appengine.Context) error {
+ return addPerfResult(c, r, req)
+ }
+ return nil, datastore.RunInTransaction(c, tx, nil)
+}
+
+// addPerfResult creates PerfResult and updates Commit, PerfTodo,
+// PerfMetricRun and PerfConfig.
+// MUST be called from inside a transaction.
+func addPerfResult(c appengine.Context, r *http.Request, req *PerfRequest) error {
+ // check Package exists
+ p, err := GetPackage(c, "")
+ if err != nil {
+ return fmt.Errorf("GetPackage: %v", err)
+ }
+ // add result to Commit
+ com := &Commit{Hash: req.Hash}
+ if err := com.AddPerfResult(c, req.Builder, req.Benchmark); err != nil {
+ return fmt.Errorf("AddPerfResult: %v", err)
+ }
+
+ // add the result to PerfResult
+ res := &PerfResult{CommitHash: req.Hash}
+ if err := datastore.Get(c, res.Key(c), res); err != nil {
+ return fmt.Errorf("getting PerfResult: %v", err)
+ }
+ present := res.AddResult(req)
+ if _, err := datastore.Put(c, res.Key(c), res); err != nil {
+ return fmt.Errorf("putting PerfResult: %v", err)
+ }
+
+ // Meta-done denotes that there are no benchmarks left.
+ if req.Benchmark == "meta-done" {
+ // Don't send duplicate emails for the same commit/builder.
+ // And don't send emails about too old commits.
+ if !present && com.Num >= p.NextNum-commitsPerPage {
+ if err := checkPerfChanges(c, r, com, req.Builder, res); err != nil {
+ return err
+ }
+ }
+ if err := removeCommitFromPerfTodo(c, req.Builder, com.Num); err != nil {
+ return nil
+ }
+ return nil
+ }
+
+ // update PerfConfig
+ newBenchmark, err := UpdatePerfConfig(c, r, req)
+ if err != nil {
+ return fmt.Errorf("updating PerfConfig: %v", err)
+ }
+ if newBenchmark {
+ // If this is a new benchmark on the builder, delete PerfTodo.
+ // It will be recreated later with all commits again.
+ todo := &PerfTodo{Builder: req.Builder}
+ err = datastore.Delete(c, todo.Key(c))
+ if err != nil && err != datastore.ErrNoSuchEntity {
+ return fmt.Errorf("deleting PerfTodo: %v", err)
+ }
+ }
+
+ // add perf metrics
+ for _, metric := range req.Metrics {
+ m, err := GetPerfMetricRun(c, req.Builder, req.Benchmark, metric.Type, com.Num)
+ if err != nil {
+ return fmt.Errorf("GetPerfMetrics: %v", err)
+ }
+ if err = m.AddMetric(c, com.Num, metric.Val); err != nil {
+ return fmt.Errorf("AddMetric: %v", err)
+ }
+ }
+
+ return nil
+}
+
+// MUST be called from inside a transaction.
+func checkPerfChanges(c appengine.Context, r *http.Request, com *Commit, builder string, res *PerfResult) error {
+ pc, err := GetPerfConfig(c, r)
+ if err != nil {
+ return err
+ }
+
+ results := res.ParseData()[builder]
+ rcNewer := MakePerfResultCache(c, com, true)
+ rcOlder := MakePerfResultCache(c, com, false)
+
+ // Check whether we need to send failure notification email.
+ if results["meta-done"].OK {
+ // This one is successful, see if the next is failed.
+ nextRes, err := rcNewer.Next(com.Num)
+ if err != nil {
+ return err
+ }
+ if nextRes != nil && isPerfFailed(nextRes, builder) {
+ sendPerfFailMail(c, builder, nextRes)
+ }
+ } else {
+ // This one is failed, see if the previous is successful.
+ prevRes, err := rcOlder.Next(com.Num)
+ if err != nil {
+ return err
+ }
+ if prevRes != nil && !isPerfFailed(prevRes, builder) {
+ sendPerfFailMail(c, builder, res)
+ }
+ }
+
+ // Now see if there are any performance changes.
+ // Find the previous and the next results for performance comparison.
+ prevRes, err := rcOlder.NextForComparison(com.Num, builder)
+ if err != nil {
+ return err
+ }
+ nextRes, err := rcNewer.NextForComparison(com.Num, builder)
+ if err != nil {
+ return err
+ }
+ if results["meta-done"].OK {
+ // This one is successful, compare with a previous one.
+ if prevRes != nil {
+ if err := comparePerfResults(c, pc, builder, prevRes, res); err != nil {
+ return err
+ }
+ }
+ // Compare a next one with the current.
+ if nextRes != nil {
+ if err := comparePerfResults(c, pc, builder, res, nextRes); err != nil {
+ return err
+ }
+ }
+ } else {
+ // This one is failed, compare a previous one with a next one.
+ if prevRes != nil && nextRes != nil {
+ if err := comparePerfResults(c, pc, builder, prevRes, nextRes); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func comparePerfResults(c appengine.Context, pc *PerfConfig, builder string, prevRes, res *PerfResult) error {
+ changes := significantPerfChanges(pc, builder, prevRes, res)
+ if len(changes) == 0 {
+ return nil
+ }
+ com := &Commit{Hash: res.CommitHash}
+ if err := datastore.Get(c, com.Key(c), com); err != nil {
+ return fmt.Errorf("getting commit %v: %v", com.Hash, err)
+ }
+ sendPerfMailLater.Call(c, com, prevRes.CommitHash, builder, changes) // add task to queue
+ return nil
+}
+
+// logHandler displays log text for a given hash.
+// It handles paths like "/log/hash".
+func logHandler(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-type", "text/plain; charset=utf-8")
+ c := contextForRequest(r)
+ hash := r.URL.Path[strings.LastIndex(r.URL.Path, "/")+1:]
+ key := datastore.NewKey(c, "Log", hash, 0, nil)
+ l := new(Log)
+ if err := datastore.Get(c, key, l); err != nil {
+ logErr(w, r, err)
+ return
+ }
+ b, err := l.Text()
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+ w.Write(b)
+}
+
+type dashHandler func(*http.Request) (interface{}, error)
+
+type dashResponse struct {
+ Response interface{}
+ Error string
+}
+
+// errBadMethod is returned by a dashHandler when
+// the request has an unsuitable method.
+type errBadMethod string
+
+func (e errBadMethod) Error() string {
+ return "bad method: " + string(e)
+}
+
+// AuthHandler wraps a http.HandlerFunc with a handler that validates the
+// supplied key and builder query parameters.
+func AuthHandler(h dashHandler) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ c := contextForRequest(r)
+
+ // Put the URL Query values into r.Form to avoid parsing the
+ // request body when calling r.FormValue.
+ r.Form = r.URL.Query()
+
+ var err error
+ var resp interface{}
+
+ // Validate key query parameter for POST requests only.
+ key := r.FormValue("key")
+ builder := r.FormValue("builder")
+ if r.Method == "POST" && !validKey(c, key, builder) {
+ err = fmt.Errorf("invalid key %q for builder %q", key, builder)
+ }
+
+ // Call the original HandlerFunc and return the response.
+ if err == nil {
+ resp, err = h(r)
+ }
+
+ // Write JSON response.
+ dashResp := &dashResponse{Response: resp}
+ if err != nil {
+ c.Errorf("%v", err)
+ dashResp.Error = err.Error()
+ }
+ w.Header().Set("Content-Type", "application/json")
+ if err = json.NewEncoder(w).Encode(dashResp); err != nil {
+ c.Criticalf("encoding response: %v", err)
+ }
+ }
+}
+
+func keyHandler(w http.ResponseWriter, r *http.Request) {
+ builder := r.FormValue("builder")
+ if builder == "" {
+ logErr(w, r, errors.New("must supply builder in query string"))
+ return
+ }
+ c := contextForRequest(r)
+ fmt.Fprint(w, builderKey(c, builder))
+}
+
+func init() {
+ for _, d := range dashboards {
+ // admin handlers
+ http.HandleFunc(d.RelPath+"init", initHandler)
+ http.HandleFunc(d.RelPath+"key", keyHandler)
+
+ // authenticated handlers
+ http.HandleFunc(d.RelPath+"commit", AuthHandler(commitHandler))
+ http.HandleFunc(d.RelPath+"packages", AuthHandler(packagesHandler))
+ http.HandleFunc(d.RelPath+"result", AuthHandler(resultHandler))
+ http.HandleFunc(d.RelPath+"perf-result", AuthHandler(perfResultHandler))
+ http.HandleFunc(d.RelPath+"tag", AuthHandler(tagHandler))
+ http.HandleFunc(d.RelPath+"todo", AuthHandler(todoHandler))
+
+ // public handlers
+ http.HandleFunc(d.RelPath+"log/", logHandler)
+ }
+}
+
+func validHash(hash string) bool {
+ // TODO(adg): correctly validate a hash
+ return hash != ""
+}
+
+func validKey(c appengine.Context, key, builder string) bool {
+ return isMasterKey(c, key) || key == builderKey(c, builder)
+}
+
+func isMasterKey(c appengine.Context, k string) bool {
+ return appengine.IsDevAppServer() || k == key.Secret(c)
+}
+
+func builderKey(c appengine.Context, builder string) string {
+ h := hmac.New(md5.New, []byte(key.Secret(c)))
+ h.Write([]byte(builder))
+ return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+func logErr(w http.ResponseWriter, r *http.Request, err error) {
+ contextForRequest(r).Errorf("Error: %v", err)
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprint(w, "Error: ", err)
+}
+
+func contextForRequest(r *http.Request) appengine.Context {
+ return dashboardForRequest(r).Context(appengine.NewContext(r))
+}
+
+// limitStringLength essentially does return s[:max],
+// but it ensures that we dot not split UTF-8 rune in half.
+// Otherwise appengine python scripts will break badly.
+func limitStringLength(s string, max int) string {
+ if len(s) <= max {
+ return s
+ }
+ for {
+ s = s[:max]
+ r, size := utf8.DecodeLastRuneInString(s)
+ if r != utf8.RuneError || size != 1 {
+ return s
+ }
+ max--
+ }
+}
diff --git a/llgo/third_party/go.tools/dashboard/app/build/init.go b/llgo/third_party/go.tools/dashboard/app/build/init.go
new file mode 100644
index 0000000000000000000000000000000000000000..e7d63ed5f9967d04e93422161d34224190c11f5a
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/init.go
@@ -0,0 +1,46 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package build
+
+import (
+ "fmt"
+ "net/http"
+
+ "appengine"
+ "appengine/datastore"
+
+ "cache"
+ "key"
+)
+
+func initHandler(w http.ResponseWriter, r *http.Request) {
+ d := dashboardForRequest(r)
+ c := d.Context(appengine.NewContext(r))
+ defer cache.Tick(c)
+ for _, p := range d.Packages {
+ err := datastore.Get(c, p.Key(c), new(Package))
+ if _, ok := err.(*datastore.ErrFieldMismatch); ok {
+ // Some fields have been removed, so it's okay to ignore this error.
+ err = nil
+ }
+ if err == nil {
+ continue
+ } else if err != datastore.ErrNoSuchEntity {
+ logErr(w, r, err)
+ return
+ }
+ if _, err := datastore.Put(c, p.Key(c), p); err != nil {
+ logErr(w, r, err)
+ return
+ }
+ }
+
+ // Create secret key.
+ key.Secret(c)
+
+ fmt.Fprint(w, "OK")
+}
diff --git a/llgo/third_party/go.tools/dashboard/app/build/notify.go b/llgo/third_party/go.tools/dashboard/app/build/notify.go
new file mode 100644
index 0000000000000000000000000000000000000000..1a71dd234b10000d4aaa0b055aaf3032dd98b2cc
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/notify.go
@@ -0,0 +1,378 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package build
+
+import (
+ "bytes"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+ "runtime"
+ "sort"
+ "text/template"
+
+ "appengine"
+ "appengine/datastore"
+ "appengine/delay"
+ "appengine/mail"
+ "appengine/urlfetch"
+)
+
+const (
+ mailFrom = "builder@golang.org" // use this for sending any mail
+ failMailTo = "golang-dev@googlegroups.com"
+ domain = "build.golang.org"
+ gobotBase = "http://research.swtch.com/gobot_codereview"
+)
+
+// ignoreFailure is a set of builders that we don't email about because
+// they are not yet production-ready.
+var ignoreFailure = map[string]bool{
+ "dragonfly-386": true,
+ "dragonfly-amd64": true,
+ "freebsd-arm": true,
+ "netbsd-amd64-bsiegert": true,
+ "netbsd-arm-rpi": true,
+ "plan9-amd64-aram": true,
+}
+
+// notifyOnFailure checks whether the supplied Commit or the subsequent
+// Commit (if present) breaks the build for this builder.
+// If either of those commits break the build an email notification is sent
+// from a delayed task. (We use a task because this way the mail won't be
+// sent if the enclosing datastore transaction fails.)
+//
+// This must be run in a datastore transaction, and the provided *Commit must
+// have been retrieved from the datastore within that transaction.
+func notifyOnFailure(c appengine.Context, com *Commit, builder string) error {
+ if ignoreFailure[builder] {
+ return nil
+ }
+
+ // TODO(adg): implement notifications for packages
+ if com.PackagePath != "" {
+ return nil
+ }
+
+ p := &Package{Path: com.PackagePath}
+ var broken *Commit
+ cr := com.Result(builder, "")
+ if cr == nil {
+ return fmt.Errorf("no result for %s/%s", com.Hash, builder)
+ }
+ q := datastore.NewQuery("Commit").Ancestor(p.Key(c))
+ if cr.OK {
+ // This commit is OK. Notify if next Commit is broken.
+ next := new(Commit)
+ q = q.Filter("ParentHash=", com.Hash)
+ if err := firstMatch(c, q, next); err != nil {
+ if err == datastore.ErrNoSuchEntity {
+ // OK at tip, no notification necessary.
+ return nil
+ }
+ return err
+ }
+ if nr := next.Result(builder, ""); nr != nil && !nr.OK {
+ c.Debugf("commit ok: %#v\nresult: %#v", com, cr)
+ c.Debugf("next commit broken: %#v\nnext result:%#v", next, nr)
+ broken = next
+ }
+ } else {
+ // This commit is broken. Notify if the previous Commit is OK.
+ prev := new(Commit)
+ q = q.Filter("Hash=", com.ParentHash)
+ if err := firstMatch(c, q, prev); err != nil {
+ if err == datastore.ErrNoSuchEntity {
+ // No previous result, let the backfill of
+ // this result trigger the notification.
+ return nil
+ }
+ return err
+ }
+ if pr := prev.Result(builder, ""); pr != nil && pr.OK {
+ c.Debugf("commit broken: %#v\nresult: %#v", com, cr)
+ c.Debugf("previous commit ok: %#v\nprevious result:%#v", prev, pr)
+ broken = com
+ }
+ }
+ if broken == nil {
+ return nil
+ }
+ r := broken.Result(builder, "")
+ if r == nil {
+ return fmt.Errorf("finding result for %q: %+v", builder, com)
+ }
+ return commonNotify(c, broken, builder, r.LogHash)
+}
+
+// firstMatch executes the query q and loads the first entity into v.
+func firstMatch(c appengine.Context, q *datastore.Query, v interface{}) error {
+ t := q.Limit(1).Run(c)
+ _, err := t.Next(v)
+ if err == datastore.Done {
+ err = datastore.ErrNoSuchEntity
+ }
+ return err
+}
+
+var notifyLater = delay.Func("notify", notify)
+
+// notify tries to update the CL for the given Commit with a failure message.
+// If it doesn't succeed, it sends a failure email to golang-dev.
+func notify(c appengine.Context, com *Commit, builder, logHash string) {
+ v := url.Values{"brokebuild": {builder}, "log": {logHash}}
+ if !updateCL(c, com, v) {
+ // Send a mail notification if the CL can't be found.
+ sendFailMail(c, com, builder, logHash)
+ }
+}
+
+// updateCL tells gobot to update the CL for the given Commit with
+// the provided query values.
+func updateCL(c appengine.Context, com *Commit, v url.Values) bool {
+ cl, err := lookupCL(c, com)
+ if err != nil {
+ c.Errorf("could not find CL for %v: %v", com.Hash, err)
+ return false
+ }
+ u := fmt.Sprintf("%v?cl=%v&%s", gobotBase, cl, v.Encode())
+ r, err := urlfetch.Client(c).Post(u, "text/plain", nil)
+ if err != nil {
+ c.Errorf("could not update CL %v: %v", cl, err)
+ return false
+ }
+ r.Body.Close()
+ if r.StatusCode != http.StatusOK {
+ c.Errorf("could not update CL %v: %v", cl, r.Status)
+ return false
+ }
+ return true
+}
+
+var clURL = regexp.MustCompile(`https://codereview.appspot.com/([0-9]+)`)
+
+// lookupCL consults code.google.com for the full change description for the
+// provided Commit, and returns the relevant CL number.
+func lookupCL(c appengine.Context, com *Commit) (string, error) {
+ url := "https://code.google.com/p/go/source/detail?r=" + com.Hash
+ r, err := urlfetch.Client(c).Get(url)
+ if err != nil {
+ return "", err
+ }
+ defer r.Body.Close()
+ if r.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("retrieving %v: %v", url, r.Status)
+ }
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ return "", err
+ }
+ m := clURL.FindAllSubmatch(b, -1)
+ if m == nil {
+ return "", errors.New("no CL URL found on changeset page")
+ }
+ // Return the last visible codereview URL on the page,
+ // in case the change description refers to another CL.
+ return string(m[len(m)-1][1]), nil
+}
+
+var sendFailMailTmpl = template.Must(template.New("notify.txt").
+ Funcs(template.FuncMap(tmplFuncs)).
+ ParseFiles("build/notify.txt"))
+
+func init() {
+ gob.Register(&Commit{}) // for delay
+}
+
+var (
+ sendPerfMailLater = delay.Func("sendPerfMail", sendPerfMailFunc)
+ sendPerfMailTmpl = template.Must(
+ template.New("perf_notify.txt").
+ Funcs(template.FuncMap(tmplFuncs)).
+ ParseFiles("build/perf_notify.txt"),
+ )
+)
+
+// MUST be called from inside a transaction.
+func sendPerfFailMail(c appengine.Context, builder string, res *PerfResult) error {
+ com := &Commit{Hash: res.CommitHash}
+ if err := datastore.Get(c, com.Key(c), com); err != nil {
+ return err
+ }
+ logHash := ""
+ parsed := res.ParseData()
+ for _, data := range parsed[builder] {
+ if !data.OK {
+ logHash = data.Artifacts["log"]
+ break
+ }
+ }
+ if logHash == "" {
+ return fmt.Errorf("can not find failed result for commit %v on builder %v", com.Hash, builder)
+ }
+ return commonNotify(c, com, builder, logHash)
+}
+
+// commonNotify MUST!!! be called from within a transaction inside which
+// the provided Commit entity was retrieved from the datastore.
+func commonNotify(c appengine.Context, com *Commit, builder, logHash string) error {
+ if com.Num == 0 || com.Desc == "" {
+ stk := make([]byte, 10000)
+ n := runtime.Stack(stk, false)
+ stk = stk[:n]
+ c.Errorf("refusing to notify with com=%+v\n%s", *com, string(stk))
+ return fmt.Errorf("misuse of commonNotify")
+ }
+ if com.FailNotificationSent {
+ return nil
+ }
+ c.Infof("%s is broken commit; notifying", com.Hash)
+ notifyLater.Call(c, com, builder, logHash) // add task to queue
+ com.FailNotificationSent = true
+ return putCommit(c, com)
+}
+
+// sendFailMail sends a mail notification that the build failed on the
+// provided commit and builder.
+func sendFailMail(c appengine.Context, com *Commit, builder, logHash string) {
+ // get Log
+ k := datastore.NewKey(c, "Log", logHash, 0, nil)
+ l := new(Log)
+ if err := datastore.Get(c, k, l); err != nil {
+ c.Errorf("finding Log record %v: %v", logHash, err)
+ return
+ }
+ logText, err := l.Text()
+ if err != nil {
+ c.Errorf("unpacking Log record %v: %v", logHash, err)
+ return
+ }
+
+ // prepare mail message
+ var body bytes.Buffer
+ err = sendFailMailTmpl.Execute(&body, map[string]interface{}{
+ "Builder": builder, "Commit": com, "LogHash": logHash, "LogText": logText,
+ "Hostname": domain,
+ })
+ if err != nil {
+ c.Errorf("rendering mail template: %v", err)
+ return
+ }
+ subject := fmt.Sprintf("%s broken by %s", builder, shortDesc(com.Desc))
+ msg := &mail.Message{
+ Sender: mailFrom,
+ To: []string{failMailTo},
+ ReplyTo: failMailTo,
+ Subject: subject,
+ Body: body.String(),
+ }
+
+ // send mail
+ if err := mail.Send(c, msg); err != nil {
+ c.Errorf("sending mail: %v", err)
+ }
+}
+
+type PerfChangeBenchmark struct {
+ Name string
+ Metrics []*PerfChangeMetric
+}
+
+type PerfChangeMetric struct {
+ Name string
+ Old uint64
+ New uint64
+ Delta float64
+}
+
+type PerfChangeBenchmarkSlice []*PerfChangeBenchmark
+
+func (l PerfChangeBenchmarkSlice) Len() int { return len(l) }
+func (l PerfChangeBenchmarkSlice) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l PerfChangeBenchmarkSlice) Less(i, j int) bool {
+ b1, p1 := splitBench(l[i].Name)
+ b2, p2 := splitBench(l[j].Name)
+ if b1 != b2 {
+ return b1 < b2
+ }
+ return p1 < p2
+}
+
+type PerfChangeMetricSlice []*PerfChangeMetric
+
+func (l PerfChangeMetricSlice) Len() int { return len(l) }
+func (l PerfChangeMetricSlice) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l PerfChangeMetricSlice) Less(i, j int) bool { return l[i].Name < l[j].Name }
+
+func sendPerfMailFunc(c appengine.Context, com *Commit, prevCommitHash, builder string, changes []*PerfChange) {
+ // Sort the changes into the right order.
+ var benchmarks []*PerfChangeBenchmark
+ for _, ch := range changes {
+ // Find the benchmark.
+ var b *PerfChangeBenchmark
+ for _, b1 := range benchmarks {
+ if b1.Name == ch.Bench {
+ b = b1
+ break
+ }
+ }
+ if b == nil {
+ b = &PerfChangeBenchmark{Name: ch.Bench}
+ benchmarks = append(benchmarks, b)
+ }
+ b.Metrics = append(b.Metrics, &PerfChangeMetric{Name: ch.Metric, Old: ch.Old, New: ch.New, Delta: ch.Diff})
+ }
+ for _, b := range benchmarks {
+ sort.Sort(PerfChangeMetricSlice(b.Metrics))
+ }
+ sort.Sort(PerfChangeBenchmarkSlice(benchmarks))
+
+ u := fmt.Sprintf("http://%v/perfdetail?commit=%v&commit0=%v&kind=builder&builder=%v", domain, com.Hash, prevCommitHash, builder)
+
+ // Prepare mail message (without Commit, for updateCL).
+ var body bytes.Buffer
+ err := sendPerfMailTmpl.Execute(&body, map[string]interface{}{
+ "Builder": builder, "Hostname": domain, "Url": u, "Benchmarks": benchmarks,
+ })
+ if err != nil {
+ c.Errorf("rendering perf mail template: %v", err)
+ return
+ }
+
+ // First, try to update the CL.
+ v := url.Values{"textmsg": {body.String()}}
+ if updateCL(c, com, v) {
+ return
+ }
+
+ // Otherwise, send mail (with Commit, for independent mail message).
+ body.Reset()
+ err = sendPerfMailTmpl.Execute(&body, map[string]interface{}{
+ "Builder": builder, "Commit": com, "Hostname": domain, "Url": u, "Benchmarks": benchmarks,
+ })
+ if err != nil {
+ c.Errorf("rendering perf mail template: %v", err)
+ return
+ }
+ subject := fmt.Sprintf("Perf changes on %s by %s", builder, shortDesc(com.Desc))
+ msg := &mail.Message{
+ Sender: mailFrom,
+ To: []string{failMailTo},
+ ReplyTo: failMailTo,
+ Subject: subject,
+ Body: body.String(),
+ }
+
+ // send mail
+ if err := mail.Send(c, msg); err != nil {
+ c.Errorf("sending mail: %v", err)
+ }
+}
diff --git a/llgo/third_party/go.tools/dashboard/app/build/notify.txt b/llgo/third_party/go.tools/dashboard/app/build/notify.txt
new file mode 100644
index 0000000000000000000000000000000000000000..514191f5ca200e25c385d810a65ec04ab961e5f0
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/notify.txt
@@ -0,0 +1,9 @@
+Change {{shortHash .Commit.Hash}} broke the {{.Builder}} build:
+http://{{.Hostname}}/log/{{.LogHash}}
+
+{{.Commit.Desc}}
+
+http://code.google.com/p/go/source/detail?r={{shortHash .Commit.Hash}}
+
+$ tail -200 < log
+{{printf "%s" .LogText | tail 200}}
diff --git a/llgo/third_party/go.tools/dashboard/app/build/perf.go b/llgo/third_party/go.tools/dashboard/app/build/perf.go
new file mode 100644
index 0000000000000000000000000000000000000000..2c16e60407036f4505bd19220b47d286aadc7b41
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/perf.go
@@ -0,0 +1,312 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package build
+
+import (
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+
+ "appengine"
+ "appengine/datastore"
+)
+
+var knownTags = map[string]string{
+ "go1": "0051c7442fed9c888de6617fa9239a913904d96e",
+ "go1.1": "d29da2ced72ba2cf48ed6a8f1ec4abc01e4c5bf1",
+ "go1.2": "b1edf8faa5d6cbc50c6515785df9df9c19296564",
+ "go1.3": "f153208c0a0e306bfca14f71ef11f09859ccabc8",
+}
+
+var lastRelease = "go1.3"
+
+func splitBench(benchProcs string) (string, int) {
+ ss := strings.Split(benchProcs, "-")
+ procs, _ := strconv.Atoi(ss[1])
+ return ss[0], procs
+}
+
+func dashPerfCommits(c appengine.Context, page int) ([]*Commit, error) {
+ q := datastore.NewQuery("Commit").
+ Ancestor((&Package{}).Key(c)).
+ Order("-Num").
+ Filter("NeedsBenchmarking =", true).
+ Limit(commitsPerPage).
+ Offset(page * commitsPerPage)
+ var commits []*Commit
+ _, err := q.GetAll(c, &commits)
+ if err == nil && len(commits) == 0 {
+ err = fmt.Errorf("no commits")
+ }
+ return commits, err
+}
+
+func perfChangeStyle(pc *PerfConfig, v float64, builder, benchmark, metric string) string {
+ noise := pc.NoiseLevel(builder, benchmark, metric)
+ if isNoise(v, noise) {
+ return "noise"
+ }
+ if v > 0 {
+ return "bad"
+ }
+ return "good"
+}
+
+func isNoise(diff, noise float64) bool {
+ rnoise := -100 * noise / (noise + 100)
+ return diff < noise && diff > rnoise
+}
+
+func perfDiff(old, new uint64) float64 {
+ return 100*float64(new)/float64(old) - 100
+}
+
+func isPerfFailed(res *PerfResult, builder string) bool {
+ data := res.ParseData()[builder]
+ return data != nil && data["meta-done"] != nil && !data["meta-done"].OK
+}
+
+// PerfResultCache caches a set of PerfResults so that it's easy to access them
+// without lots of duplicate accesses to datastore.
+// It allows to iterate over newer or older results for some base commit.
+type PerfResultCache struct {
+ c appengine.Context
+ newer bool
+ iter *datastore.Iterator
+ results map[int]*PerfResult
+}
+
+func MakePerfResultCache(c appengine.Context, com *Commit, newer bool) *PerfResultCache {
+ p := &Package{}
+ q := datastore.NewQuery("PerfResult").Ancestor(p.Key(c)).Limit(100)
+ if newer {
+ q = q.Filter("CommitNum >=", com.Num).Order("CommitNum")
+ } else {
+ q = q.Filter("CommitNum <=", com.Num).Order("-CommitNum")
+ }
+ rc := &PerfResultCache{c: c, newer: newer, iter: q.Run(c), results: make(map[int]*PerfResult)}
+ return rc
+}
+
+func (rc *PerfResultCache) Get(commitNum int) *PerfResult {
+ rc.Next(commitNum) // fetch the commit, if necessary
+ return rc.results[commitNum]
+}
+
+// Next returns the next PerfResult for the commit commitNum.
+// It does not care whether the result has any data, failed or whatever.
+func (rc *PerfResultCache) Next(commitNum int) (*PerfResult, error) {
+ // See if we have next result in the cache.
+ next := -1
+ for ci := range rc.results {
+ if rc.newer {
+ if ci > commitNum && (next == -1 || ci < next) {
+ next = ci
+ }
+ } else {
+ if ci < commitNum && (next == -1 || ci > next) {
+ next = ci
+ }
+ }
+ }
+ if next != -1 {
+ return rc.results[next], nil
+ }
+ // Fetch next result from datastore.
+ res := new(PerfResult)
+ _, err := rc.iter.Next(res)
+ if err == datastore.Done {
+ return nil, nil
+ }
+ if err != nil {
+ return nil, fmt.Errorf("fetching perf results: %v", err)
+ }
+ if (rc.newer && res.CommitNum < commitNum) || (!rc.newer && res.CommitNum > commitNum) {
+ rc.c.Errorf("PerfResultCache.Next: bad commit num")
+ }
+ rc.results[res.CommitNum] = res
+ return res, nil
+}
+
+// NextForComparison returns PerfResult which we need to use for performance comprison.
+// It skips failed results, but does not skip results with no data.
+func (rc *PerfResultCache) NextForComparison(commitNum int, builder string) (*PerfResult, error) {
+ for {
+ res, err := rc.Next(commitNum)
+ if err != nil {
+ return nil, err
+ }
+ if res == nil {
+ return nil, nil
+ }
+ if res.CommitNum == commitNum {
+ continue
+ }
+ parsed := res.ParseData()
+ if builder != "" {
+ // Comparing for a particular builder.
+ // This is used in perf_changes and in email notifications.
+ b := parsed[builder]
+ if b == nil || b["meta-done"] == nil {
+ // No results yet, must not do the comparison.
+ return nil, nil
+ }
+ if b["meta-done"].OK {
+ // Have complete results, compare.
+ return res, nil
+ }
+ } else {
+ // Comparing for all builders, find a result with at least
+ // one successful meta-done.
+ // This is used in perf_detail.
+ for _, benchs := range parsed {
+ if data := benchs["meta-done"]; data != nil && data.OK {
+ return res, nil
+ }
+ }
+ }
+ // Failed, try next result.
+ commitNum = res.CommitNum
+ }
+}
+
+type PerfChange struct {
+ Builder string
+ Bench string
+ Metric string
+ Old uint64
+ New uint64
+ Diff float64
+}
+
+func significantPerfChanges(pc *PerfConfig, builder string, prevRes, res *PerfResult) (changes []*PerfChange) {
+ // First, collect all significant changes.
+ for builder1, benchmarks1 := range res.ParseData() {
+ if builder != "" && builder != builder1 {
+ // This is not the builder you're looking for, Luke.
+ continue
+ }
+ benchmarks0 := prevRes.ParseData()[builder1]
+ if benchmarks0 == nil {
+ continue
+ }
+ for benchmark, data1 := range benchmarks1 {
+ data0 := benchmarks0[benchmark]
+ if data0 == nil {
+ continue
+ }
+ for metric, val := range data1.Metrics {
+ val0 := data0.Metrics[metric]
+ if val0 == 0 {
+ continue
+ }
+ diff := perfDiff(val0, val)
+ noise := pc.NoiseLevel(builder, benchmark, metric)
+ if isNoise(diff, noise) {
+ continue
+ }
+ ch := &PerfChange{Builder: builder, Bench: benchmark, Metric: metric, Old: val0, New: val, Diff: diff}
+ changes = append(changes, ch)
+ }
+ }
+ }
+ // Then, strip non-repeatable changes (flakes).
+ // The hypothesis is that a real change must show up with the majority of GOMAXPROCS values.
+ majority := len(pc.ProcList(builder))/2 + 1
+ cnt := make(map[string]int)
+ for _, ch := range changes {
+ b, _ := splitBench(ch.Bench)
+ name := b + "|" + ch.Metric
+ if ch.Diff < 0 {
+ name += "--"
+ }
+ cnt[name] = cnt[name] + 1
+ }
+ for i := 0; i < len(changes); i++ {
+ ch := changes[i]
+ b, _ := splitBench(ch.Bench)
+ name := b + "|" + ch.Metric
+ if cnt[name] >= majority {
+ continue
+ }
+ if cnt[name+"--"] >= majority {
+ continue
+ }
+ // Remove flake.
+ last := len(changes) - 1
+ changes[i] = changes[last]
+ changes = changes[:last]
+ i--
+ }
+ return changes
+}
+
+// orderPerfTodo reorders commit nums for benchmarking todo.
+// The resulting order is somewhat tricky. We want 2 things:
+// 1. benchmark sequentially backwards (this provides information about most
+// recent changes, and allows to estimate noise levels)
+// 2. benchmark old commits in "scatter" order (this allows to quickly gather
+// brief information about thousands of old commits)
+// So this function interleaves the two orders.
+func orderPerfTodo(nums []int) []int {
+ sort.Ints(nums)
+ n := len(nums)
+ pow2 := uint32(0) // next power-of-two that is >= n
+ npow2 := 0
+ for npow2 <= n {
+ pow2++
+ npow2 = 1 << pow2
+ }
+ res := make([]int, n)
+ resPos := n - 1 // result array is filled backwards
+ present := make([]bool, n) // denotes values that already present in result array
+ for i0, i1 := n-1, 0; i0 >= 0 || i1 < npow2; {
+ // i0 represents "benchmark sequentially backwards" sequence
+ // find the next commit that is not yet present and add it
+ for cnt := 0; cnt < 2; cnt++ {
+ for ; i0 >= 0; i0-- {
+ if !present[i0] {
+ present[i0] = true
+ res[resPos] = nums[i0]
+ resPos--
+ i0--
+ break
+ }
+ }
+ }
+ // i1 represents "scatter order" sequence
+ // find the next commit that is not yet present and add it
+ for ; i1 < npow2; i1++ {
+ // do the "recursive split-ordering" trick
+ idx := 0 // bitwise reverse of i1
+ for j := uint32(0); j <= pow2; j++ {
+ if (i1 & (1 << j)) != 0 {
+ idx = idx | (1 << (pow2 - j - 1))
+ }
+ }
+ if idx < n && !present[idx] {
+ present[idx] = true
+ res[resPos] = nums[idx]
+ resPos--
+ i1++
+ break
+ }
+ }
+ }
+ // The above can't possibly be correct. Do dump check.
+ res2 := make([]int, n)
+ copy(res2, res)
+ sort.Ints(res2)
+ for i := range res2 {
+ if res2[i] != nums[i] {
+ panic(fmt.Sprintf("diff at %v: expect %v, want %v\nwas: %v\n become: %v",
+ i, nums[i], res2[i], nums, res2))
+ }
+ }
+ return res
+}
diff --git a/llgo/third_party/go.tools/dashboard/app/build/perf_changes.go b/llgo/third_party/go.tools/dashboard/app/build/perf_changes.go
new file mode 100644
index 0000000000000000000000000000000000000000..4abbf1a4ddf376c3d71fa7812f537451c8cb621c
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/perf_changes.go
@@ -0,0 +1,282 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package build
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "net/http"
+ "sort"
+ "strconv"
+
+ "appengine"
+ "appengine/datastore"
+)
+
+func init() {
+ http.HandleFunc("/perf", perfChangesHandler)
+}
+
+// perfSummaryHandler draws the main benchmarking page.
+func perfChangesHandler(w http.ResponseWriter, r *http.Request) {
+ d := dashboardForRequest(r)
+ c := d.Context(appengine.NewContext(r))
+
+ page, _ := strconv.Atoi(r.FormValue("page"))
+ if page < 0 {
+ page = 0
+ }
+
+ pc, err := GetPerfConfig(c, r)
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+
+ commits, err := dashPerfCommits(c, page)
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+
+ // Fetch PerfResult's for the commits.
+ var uiCommits []*perfChangesCommit
+ rc := MakePerfResultCache(c, commits[0], false)
+
+ // But first compare tip with the last release.
+ if page == 0 {
+ res0 := &PerfResult{CommitHash: knownTags[lastRelease]}
+ if err := datastore.Get(c, res0.Key(c), res0); err != nil && err != datastore.ErrNoSuchEntity {
+ logErr(w, r, fmt.Errorf("getting PerfResult: %v", err))
+ return
+ }
+ if err != datastore.ErrNoSuchEntity {
+ uiCom, err := handleOneCommit(pc, commits[0], rc, res0)
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+ uiCom.IsSummary = true
+ uiCom.ParentHash = lastRelease
+ uiCommits = append(uiCommits, uiCom)
+ }
+ }
+
+ for _, com := range commits {
+ uiCom, err := handleOneCommit(pc, com, rc, nil)
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+ uiCommits = append(uiCommits, uiCom)
+ }
+
+ p := &Pagination{}
+ if len(commits) == commitsPerPage {
+ p.Next = page + 1
+ }
+ if page > 0 {
+ p.Prev = page - 1
+ p.HasPrev = true
+ }
+
+ data := &perfChangesData{d, p, uiCommits}
+
+ var buf bytes.Buffer
+ if err := perfChangesTemplate.Execute(&buf, data); err != nil {
+ logErr(w, r, err)
+ return
+ }
+
+ buf.WriteTo(w)
+}
+
+func handleOneCommit(pc *PerfConfig, com *Commit, rc *PerfResultCache, baseRes *PerfResult) (*perfChangesCommit, error) {
+ uiCom := new(perfChangesCommit)
+ uiCom.Commit = com
+ res1 := rc.Get(com.Num)
+ for builder, benchmarks1 := range res1.ParseData() {
+ for benchmark, data1 := range benchmarks1 {
+ if benchmark != "meta-done" || !data1.OK {
+ uiCom.NumResults++
+ }
+ if !data1.OK {
+ v := new(perfChangesChange)
+ v.diff = 10000
+ v.Style = "fail"
+ v.Builder = builder
+ v.Link = fmt.Sprintf("log/%v", data1.Artifacts["log"])
+ v.Val = builder
+ v.Hint = builder
+ if benchmark != "meta-done" {
+ v.Hint += "/" + benchmark
+ }
+ m := findMetric(uiCom, "failure")
+ m.BadChanges = append(m.BadChanges, v)
+ }
+ }
+ res0 := baseRes
+ if res0 == nil {
+ var err error
+ res0, err = rc.NextForComparison(com.Num, builder)
+ if err != nil {
+ return nil, err
+ }
+ if res0 == nil {
+ continue
+ }
+ }
+ changes := significantPerfChanges(pc, builder, res0, res1)
+ changes = dedupPerfChanges(changes)
+ for _, ch := range changes {
+ v := new(perfChangesChange)
+ v.Builder = builder
+ v.Benchmark, v.Procs = splitBench(ch.Bench)
+ v.diff = ch.Diff
+ v.Val = fmt.Sprintf("%+.2f%%", ch.Diff)
+ v.Hint = fmt.Sprintf("%v/%v", builder, ch.Bench)
+ v.Link = fmt.Sprintf("perfdetail?commit=%v&commit0=%v&builder=%v&benchmark=%v", com.Hash, res0.CommitHash, builder, v.Benchmark)
+ m := findMetric(uiCom, ch.Metric)
+ if v.diff > 0 {
+ v.Style = "bad"
+ m.BadChanges = append(m.BadChanges, v)
+ } else {
+ v.Style = "good"
+ m.GoodChanges = append(m.GoodChanges, v)
+ }
+ }
+ }
+
+ // Sort metrics and changes.
+ for _, m := range uiCom.Metrics {
+ sort.Sort(m.GoodChanges)
+ sort.Sort(m.BadChanges)
+ }
+ sort.Sort(uiCom.Metrics)
+ // Need at least one metric for UI.
+ if len(uiCom.Metrics) == 0 {
+ uiCom.Metrics = append(uiCom.Metrics, &perfChangesMetric{})
+ }
+ uiCom.Metrics[0].First = true
+ return uiCom, nil
+}
+
+// Find builder-procs with the maximum absolute diff for every benchmark-metric, drop the rest.
+func dedupPerfChanges(changes []*PerfChange) (deduped []*PerfChange) {
+ maxDiff := make(map[string]float64)
+ maxBench := make(map[string]string)
+ // First, find the maximum.
+ for _, ch := range changes {
+ bench, _ := splitBench(ch.Bench)
+ k := bench + "|" + ch.Metric
+ v := ch.Diff
+ if v < 0 {
+ v = -v
+ }
+ if maxDiff[k] < v {
+ maxDiff[k] = v
+ maxBench[k] = ch.Builder + "|" + ch.Bench
+ }
+ }
+ // Then, remove the rest.
+ for _, ch := range changes {
+ bench, _ := splitBench(ch.Bench)
+ k := bench + "|" + ch.Metric
+ if maxBench[k] == ch.Builder+"|"+ch.Bench {
+ deduped = append(deduped, ch)
+ }
+ }
+ return
+}
+
+func findMetric(c *perfChangesCommit, metric string) *perfChangesMetric {
+ for _, m := range c.Metrics {
+ if m.Name == metric {
+ return m
+ }
+ }
+ m := new(perfChangesMetric)
+ m.Name = metric
+ c.Metrics = append(c.Metrics, m)
+ return m
+}
+
+type uiPerfConfig struct {
+ Builders []uiPerfConfigElem
+ Benchmarks []uiPerfConfigElem
+ Metrics []uiPerfConfigElem
+ Procs []uiPerfConfigElem
+ CommitsFrom []uiPerfConfigElem
+ CommitsTo []uiPerfConfigElem
+}
+
+type uiPerfConfigElem struct {
+ Name string
+ Selected bool
+}
+
+var perfChangesTemplate = template.Must(
+ template.New("perf_changes.html").Funcs(tmplFuncs).ParseFiles("build/perf_changes.html"),
+)
+
+type perfChangesData struct {
+ Dashboard *Dashboard
+ Pagination *Pagination
+ Commits []*perfChangesCommit
+}
+
+type perfChangesCommit struct {
+ *Commit
+ IsSummary bool
+ NumResults int
+ Metrics perfChangesMetricSlice
+}
+
+type perfChangesMetric struct {
+ Name string
+ First bool
+ BadChanges perfChangesChangeSlice
+ GoodChanges perfChangesChangeSlice
+}
+
+type perfChangesChange struct {
+ Builder string
+ Benchmark string
+ Link string
+ Hint string
+ Style string
+ Val string
+ Procs int
+ diff float64
+}
+
+type perfChangesMetricSlice []*perfChangesMetric
+
+func (l perfChangesMetricSlice) Len() int { return len(l) }
+func (l perfChangesMetricSlice) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l perfChangesMetricSlice) Less(i, j int) bool {
+ if l[i].Name == "failure" || l[j].Name == "failure" {
+ return l[i].Name == "failure"
+ }
+ return l[i].Name < l[j].Name
+}
+
+type perfChangesChangeSlice []*perfChangesChange
+
+func (l perfChangesChangeSlice) Len() int { return len(l) }
+func (l perfChangesChangeSlice) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l perfChangesChangeSlice) Less(i, j int) bool {
+ vi, vj := l[i].diff, l[j].diff
+ if vi > 0 && vj > 0 {
+ return vi > vj
+ } else if vi < 0 && vj < 0 {
+ return vi < vj
+ } else {
+ panic("comparing positive and negative diff")
+ }
+}
diff --git a/llgo/third_party/go.tools/dashboard/app/build/perf_changes.html b/llgo/third_party/go.tools/dashboard/app/build/perf_changes.html
new file mode 100644
index 0000000000000000000000000000000000000000..24f0534d95df72d960c4fc7228477612f7e6cb44
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/perf_changes.html
@@ -0,0 +1,89 @@
+
+
+
+ {{$.Dashboard.Name}} Dashboard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {{range $c := $.Commits}}
+ {{range $m := $c.Metrics}}
+ {{if $m.First}}
+
+ {{if $c.IsSummary}}
+ tip vs {{$c.ParentHash}}
+ {{else}}
+ {{shortHash $c.Hash}}
+ {{end}}
+ {{$c.NumResults}}
+ {{else}}
+
+
+
+ {{end}}
+ {{$m.Name}}
+
+ {{range $ch := $m.BadChanges}}
+ {{$ch.Val}}
+ {{end}}
+
+
+ {{range $ch := $m.GoodChanges}}
+ {{$ch.Val}}
+ {{end}}
+
+ {{if $m.First}}
+ {{shortUser $c.User}}
+ {{$c.Time.Format "Mon 02 Jan 15:04"}}
+ {{shortDesc $c.Desc}}
+ {{else}}
+
+
+
+ {{end}}
+
+ {{end}}
+ {{if $c.IsSummary}}
+ ---
+ {{end}}
+ {{end}}
+
+
+
+ {{with $.Pagination}}
+
+ {{end}}
+
+
+
+
+
+
diff --git a/llgo/third_party/go.tools/dashboard/app/build/perf_detail.go b/llgo/third_party/go.tools/dashboard/app/build/perf_detail.go
new file mode 100644
index 0000000000000000000000000000000000000000..f8d9bfda9ede8b77952aaf19db2e2eca615e8a13
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/perf_detail.go
@@ -0,0 +1,221 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package build
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "net/http"
+ "sort"
+ "strconv"
+ "strings"
+
+ "appengine"
+ "appengine/datastore"
+)
+
+func init() {
+ for _, d := range dashboards {
+ http.HandleFunc(d.RelPath+"perfdetail", perfDetailUIHandler)
+ }
+}
+
+func perfDetailUIHandler(w http.ResponseWriter, r *http.Request) {
+ d := dashboardForRequest(r)
+ c := d.Context(appengine.NewContext(r))
+ pc, err := GetPerfConfig(c, r)
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+
+ kind := r.FormValue("kind")
+ builder := r.FormValue("builder")
+ benchmark := r.FormValue("benchmark")
+ if kind == "" {
+ kind = "benchmark"
+ }
+ if kind != "benchmark" && kind != "builder" {
+ logErr(w, r, fmt.Errorf("unknown kind %s", kind))
+ return
+ }
+
+ // Fetch the new commit.
+ com1 := new(Commit)
+ com1.Hash = r.FormValue("commit")
+ if hash, ok := knownTags[com1.Hash]; ok {
+ com1.Hash = hash
+ }
+ if err := datastore.Get(c, com1.Key(c), com1); err != nil {
+ logErr(w, r, fmt.Errorf("failed to fetch commit %s: %v", com1.Hash, err))
+ return
+ }
+ // Fetch the associated perf result.
+ ress1 := &PerfResult{CommitHash: com1.Hash}
+ if err := datastore.Get(c, ress1.Key(c), ress1); err != nil {
+ logErr(w, r, fmt.Errorf("failed to fetch perf result %s: %v", com1.Hash, err))
+ return
+ }
+
+ // Fetch the old commit.
+ var ress0 *PerfResult
+ com0 := new(Commit)
+ com0.Hash = r.FormValue("commit0")
+ if hash, ok := knownTags[com0.Hash]; ok {
+ com0.Hash = hash
+ }
+ if com0.Hash != "" {
+ // Have an exact commit hash, fetch directly.
+ if err := datastore.Get(c, com0.Key(c), com0); err != nil {
+ logErr(w, r, fmt.Errorf("failed to fetch commit %s: %v", com0.Hash, err))
+ return
+ }
+ ress0 = &PerfResult{CommitHash: com0.Hash}
+ if err := datastore.Get(c, ress0.Key(c), ress0); err != nil {
+ logErr(w, r, fmt.Errorf("failed to fetch perf result for %s: %v", com0.Hash, err))
+ return
+ }
+ } else {
+ // Don't have the commit hash, find the previous commit to compare.
+ rc := MakePerfResultCache(c, com1, false)
+ ress0, err = rc.NextForComparison(com1.Num, "")
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+ if ress0 == nil {
+ logErr(w, r, fmt.Errorf("no previous commit with results"))
+ return
+ }
+ // Now that we know the right result, fetch the commit.
+ com0.Hash = ress0.CommitHash
+ if err := datastore.Get(c, com0.Key(c), com0); err != nil {
+ logErr(w, r, fmt.Errorf("failed to fetch commit %s: %v", com0.Hash, err))
+ return
+ }
+ }
+
+ res0 := ress0.ParseData()
+ res1 := ress1.ParseData()
+ var benchmarks []*uiPerfDetailBenchmark
+ var list []string
+ if kind == "builder" {
+ list = pc.BenchmarksForBuilder(builder)
+ } else {
+ list = pc.BuildersForBenchmark(benchmark)
+ }
+ for _, other := range list {
+ if kind == "builder" {
+ benchmark = other
+ } else {
+ builder = other
+ }
+ var procs []*uiPerfDetailProcs
+ allProcs := pc.ProcList(builder)
+ for _, p := range allProcs {
+ BenchProcs := fmt.Sprintf("%v-%v", benchmark, p)
+ if res0[builder] == nil || res0[builder][BenchProcs] == nil {
+ continue
+ }
+ pp := &uiPerfDetailProcs{Procs: p}
+ for metric, val := range res0[builder][BenchProcs].Metrics {
+ var pm uiPerfDetailMetric
+ pm.Name = metric
+ pm.Val0 = fmt.Sprintf("%v", val)
+ val1 := uint64(0)
+ if res1[builder] != nil && res1[builder][BenchProcs] != nil {
+ val1 = res1[builder][BenchProcs].Metrics[metric]
+ }
+ pm.Val1 = fmt.Sprintf("%v", val1)
+ v0 := val
+ v1 := val1
+ valf := perfDiff(v0, v1)
+ pm.Delta = fmt.Sprintf("%+.2f%%", valf)
+ pm.Style = perfChangeStyle(pc, valf, builder, BenchProcs, pm.Name)
+ pp.Metrics = append(pp.Metrics, pm)
+ }
+ sort.Sort(pp.Metrics)
+ for artifact, hash := range res0[builder][BenchProcs].Artifacts {
+ var pm uiPerfDetailMetric
+ pm.Val0 = fmt.Sprintf("%v", artifact)
+ pm.Link0 = fmt.Sprintf("log/%v", hash)
+ pm.Val1 = fmt.Sprintf("%v", artifact)
+ if res1[builder] != nil && res1[builder][BenchProcs] != nil && res1[builder][BenchProcs].Artifacts[artifact] != "" {
+ pm.Link1 = fmt.Sprintf("log/%v", res1[builder][BenchProcs].Artifacts[artifact])
+ }
+ pp.Metrics = append(pp.Metrics, pm)
+ }
+ procs = append(procs, pp)
+ }
+ benchmarks = append(benchmarks, &uiPerfDetailBenchmark{other, procs})
+ }
+
+ cfg := new(uiPerfConfig)
+ for _, v := range pc.BuildersForBenchmark("") {
+ cfg.Builders = append(cfg.Builders, uiPerfConfigElem{v, v == builder})
+ }
+ for _, v := range pc.BenchmarksForBuilder("") {
+ cfg.Benchmarks = append(cfg.Benchmarks, uiPerfConfigElem{v, v == benchmark})
+ }
+
+ data := &uiPerfDetailTemplateData{d, cfg, kind == "builder", com0, com1, benchmarks}
+
+ var buf bytes.Buffer
+ if err := uiPerfDetailTemplate.Execute(&buf, data); err != nil {
+ logErr(w, r, err)
+ return
+ }
+
+ buf.WriteTo(w)
+}
+
+func perfResultSplit(s string) (builder string, benchmark string, procs int) {
+ s1 := strings.Split(s, "|")
+ s2 := strings.Split(s1[1], "-")
+ procs, _ = strconv.Atoi(s2[1])
+ return s1[0], s2[0], procs
+}
+
+type uiPerfDetailTemplateData struct {
+ Dashboard *Dashboard
+ Config *uiPerfConfig
+ KindBuilder bool
+ Commit0 *Commit
+ Commit1 *Commit
+ Benchmarks []*uiPerfDetailBenchmark
+}
+
+type uiPerfDetailBenchmark struct {
+ Name string
+ Procs []*uiPerfDetailProcs
+}
+
+type uiPerfDetailProcs struct {
+ Procs int
+ Metrics uiPerfDetailMetrics
+}
+
+type uiPerfDetailMetric struct {
+ Name string
+ Val0 string
+ Val1 string
+ Link0 string
+ Link1 string
+ Delta string
+ Style string
+}
+
+type uiPerfDetailMetrics []uiPerfDetailMetric
+
+func (l uiPerfDetailMetrics) Len() int { return len(l) }
+func (l uiPerfDetailMetrics) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l uiPerfDetailMetrics) Less(i, j int) bool { return l[i].Name < l[j].Name }
+
+var uiPerfDetailTemplate = template.Must(
+ template.New("perf_detail.html").Funcs(tmplFuncs).ParseFiles("build/perf_detail.html"),
+)
diff --git a/llgo/third_party/go.tools/dashboard/app/build/perf_detail.html b/llgo/third_party/go.tools/dashboard/app/build/perf_detail.html
new file mode 100644
index 0000000000000000000000000000000000000000..18b30283cdca353f085acb764ba77028cf1b53ab
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/perf_detail.html
@@ -0,0 +1,101 @@
+
+
+
+ {{$.Dashboard.Name}} Dashboard
+
+
+
+
+
+
+
+
+
+
+
+ {{range $b := $.Benchmarks}}
+
+
{{$b.Name}}
+ {{range $p := $b.Procs}}
+
+
GOMAXPROCS={{$p.Procs}}
+
+
+
+ Metric
+ old
+ new
+ delta
+
+
+
+ {{range $m := $p.Metrics}}
+
+ {{$m.Name}}
+ {{if $m.Link0}}
+ {{$m.Val0}}
+ {{else}}
+ {{$m.Val0}}
+ {{end}}
+ {{if $m.Link1}}
+ {{$m.Val1}}
+ {{else}}
+ {{$m.Val1}}
+ {{end}}
+ {{$m.Delta}}
+
+ {{end}}
+
+
+
+ {{end}}
+
+ {{end}}
+
+
+
+
+
+
+
diff --git a/llgo/third_party/go.tools/dashboard/app/build/perf_graph.go b/llgo/third_party/go.tools/dashboard/app/build/perf_graph.go
new file mode 100644
index 0000000000000000000000000000000000000000..81eb5e1621df95721076f3bb3f2906550c09457b
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/perf_graph.go
@@ -0,0 +1,270 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package build
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "net/http"
+ "strconv"
+
+ "appengine"
+ "appengine/datastore"
+)
+
+func init() {
+ for _, d := range dashboards {
+ http.HandleFunc(d.RelPath+"perfgraph", perfGraphHandler)
+ }
+}
+
+func perfGraphHandler(w http.ResponseWriter, r *http.Request) {
+ d := dashboardForRequest(r)
+ c := d.Context(appengine.NewContext(r))
+ pc, err := GetPerfConfig(c, r)
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+ allBuilders := pc.BuildersForBenchmark("")
+ allBenchmarks := pc.BenchmarksForBuilder("")
+ allMetrics := pc.MetricsForBenchmark("")
+ allProcs := pc.ProcList("")
+ r.ParseForm()
+ selBuilders := r.Form["builder"]
+ selBenchmarks := r.Form["benchmark"]
+ selMetrics := r.Form["metric"]
+ selProcs := r.Form["procs"]
+ if len(selBuilders) == 0 {
+ selBuilders = append(selBuilders, allBuilders[0])
+ }
+ if len(selBenchmarks) == 0 {
+ selBenchmarks = append(selBenchmarks, "json")
+ }
+ if len(selMetrics) == 0 {
+ selMetrics = append(selMetrics, "time")
+ }
+ if len(selProcs) == 0 {
+ selProcs = append(selProcs, "1")
+ }
+ commitFrom := r.FormValue("commit-from")
+ if commitFrom == "" {
+ commitFrom = lastRelease
+ }
+ commitTo := r.FormValue("commit-to")
+ if commitTo == "" {
+ commitTo = "tip"
+ }
+ // TODO(dvyukov): validate input
+
+ // Figure out start and end commit from commitFrom/commitTo.
+ startCommitNum := 0
+ endCommitNum := 0
+ {
+ comFrom := &Commit{Hash: knownTags[commitFrom]}
+ if err := datastore.Get(c, comFrom.Key(c), comFrom); err != nil {
+ logErr(w, r, err)
+ return
+ }
+ startCommitNum = comFrom.Num
+
+ retry:
+ if commitTo == "tip" {
+ p, err := GetPackage(c, "")
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+ endCommitNum = p.NextNum
+ } else {
+ comTo := &Commit{Hash: knownTags[commitTo]}
+ if err := datastore.Get(c, comTo.Key(c), comTo); err != nil {
+ logErr(w, r, err)
+ return
+ }
+ endCommitNum = comTo.Num + 1
+ }
+ if endCommitNum <= startCommitNum {
+ // User probably selected from:go1.3 to:go1.2. Fix go1.2 to tip.
+ if commitTo == "tip" {
+ logErr(w, r, fmt.Errorf("no commits to display (%v-%v)", commitFrom, commitTo))
+ return
+ }
+ commitTo = "tip"
+ goto retry
+ }
+ }
+ commitsToDisplay := endCommitNum - startCommitNum
+
+ present := func(set []string, s string) bool {
+ for _, s1 := range set {
+ if s1 == s {
+ return true
+ }
+ }
+ return false
+ }
+
+ cfg := &uiPerfConfig{}
+ for _, v := range allBuilders {
+ cfg.Builders = append(cfg.Builders, uiPerfConfigElem{v, present(selBuilders, v)})
+ }
+ for _, v := range allBenchmarks {
+ cfg.Benchmarks = append(cfg.Benchmarks, uiPerfConfigElem{v, present(selBenchmarks, v)})
+ }
+ for _, v := range allMetrics {
+ cfg.Metrics = append(cfg.Metrics, uiPerfConfigElem{v, present(selMetrics, v)})
+ }
+ for _, v := range allProcs {
+ cfg.Procs = append(cfg.Procs, uiPerfConfigElem{strconv.Itoa(v), present(selProcs, strconv.Itoa(v))})
+ }
+ for k := range knownTags {
+ cfg.CommitsFrom = append(cfg.CommitsFrom, uiPerfConfigElem{k, commitFrom == k})
+ }
+ for k := range knownTags {
+ cfg.CommitsTo = append(cfg.CommitsTo, uiPerfConfigElem{k, commitTo == k})
+ }
+ cfg.CommitsTo = append(cfg.CommitsTo, uiPerfConfigElem{"tip", commitTo == "tip"})
+
+ var vals [][]float64
+ var hints [][]string
+ var annotations [][]string
+ var certainty [][]bool
+ var headers []string
+ commits2, err := GetCommits(c, startCommitNum, commitsToDisplay)
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+ for _, builder := range selBuilders {
+ for _, metric := range selMetrics {
+ for _, benchmark := range selBenchmarks {
+ for _, procs := range selProcs {
+ benchProcs := fmt.Sprintf("%v-%v", benchmark, procs)
+ vv, err := GetPerfMetricsForCommits(c, builder, benchProcs, metric, startCommitNum, commitsToDisplay)
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+ hasdata := false
+ for _, v := range vv {
+ if v != 0 {
+ hasdata = true
+ }
+ }
+ if hasdata {
+ noise := pc.NoiseLevel(builder, benchProcs, metric)
+ descBuilder := "/" + builder
+ descBenchmark := "/" + benchProcs
+ descMetric := "/" + metric
+ if len(selBuilders) == 1 {
+ descBuilder = ""
+ }
+ if len(selBenchmarks) == 1 && len(selProcs) == 1 {
+ descBenchmark = ""
+ }
+ if len(selMetrics) == 1 && (len(selBuilders) > 1 || len(selBenchmarks) > 1 || len(selProcs) > 1) {
+ descMetric = ""
+ }
+ desc := fmt.Sprintf("%v%v%v", descBuilder, descBenchmark, descMetric)[1:]
+ hh := make([]string, commitsToDisplay)
+ ann := make([]string, commitsToDisplay)
+ valf := make([]float64, commitsToDisplay)
+ cert := make([]bool, commitsToDisplay)
+ firstval := uint64(0)
+ lastval := uint64(0)
+ for i, v := range vv {
+ cert[i] = true
+ if v == 0 {
+ if lastval == 0 {
+ continue
+ }
+ cert[i] = false
+ v = lastval
+ }
+ if firstval == 0 {
+ firstval = v
+ }
+ valf[i] = float64(v) / float64(firstval)
+ if cert[i] {
+ d := ""
+ if lastval != 0 {
+ diff := perfDiff(lastval, v)
+ d = fmt.Sprintf(" (%+.02f%%)", diff)
+ if !isNoise(diff, noise) {
+ ann[i] = fmt.Sprintf("%+.02f%%", diff)
+ }
+ }
+ hh[i] = fmt.Sprintf("%v%v", v, d)
+ } else {
+ hh[i] = "NO DATA"
+ }
+ lastval = v
+ }
+ vals = append(vals, valf)
+ hints = append(hints, hh)
+ annotations = append(annotations, ann)
+ certainty = append(certainty, cert)
+ headers = append(headers, desc)
+ }
+ }
+ }
+ }
+ }
+
+ var commits []perfGraphCommit
+ if len(vals) != 0 && len(vals[0]) != 0 {
+ idx := 0
+ for i := range vals[0] {
+ com := commits2[i]
+ if com == nil || !com.NeedsBenchmarking {
+ continue
+ }
+ c := perfGraphCommit{Id: idx, Name: fmt.Sprintf("%v (%v)", com.Desc, com.Time.Format("Jan 2, 2006 1:04"))}
+ idx++
+ for j := range vals {
+ c.Vals = append(c.Vals, perfGraphValue{float64(vals[j][i]), certainty[j][i], hints[j][i], annotations[j][i]})
+ }
+ commits = append(commits, c)
+ }
+ }
+
+ data := &perfGraphData{d, cfg, headers, commits}
+
+ var buf bytes.Buffer
+ if err := perfGraphTemplate.Execute(&buf, data); err != nil {
+ logErr(w, r, err)
+ return
+ }
+
+ buf.WriteTo(w)
+}
+
+var perfGraphTemplate = template.Must(
+ template.New("perf_graph.html").ParseFiles("build/perf_graph.html"),
+)
+
+type perfGraphData struct {
+ Dashboard *Dashboard
+ Config *uiPerfConfig
+ Headers []string
+ Commits []perfGraphCommit
+}
+
+type perfGraphCommit struct {
+ Id int
+ Name string
+ Vals []perfGraphValue
+}
+
+type perfGraphValue struct {
+ Val float64
+ Certainty bool
+ Hint string
+ Ann string
+}
diff --git a/llgo/third_party/go.tools/dashboard/app/build/perf_graph.html b/llgo/third_party/go.tools/dashboard/app/build/perf_graph.html
new file mode 100644
index 0000000000000000000000000000000000000000..da1c0d000d0d0f91dc2d65de686fad060e8ae657
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/perf_graph.html
@@ -0,0 +1,120 @@
+
+
+
+ {{$.Dashboard.Name}} Dashboard
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/llgo/third_party/go.tools/dashboard/app/build/perf_learn.go b/llgo/third_party/go.tools/dashboard/app/build/perf_learn.go
new file mode 100644
index 0000000000000000000000000000000000000000..683ba60b09b5cb38042aff45d8ff53243d5adb5a
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/perf_learn.go
@@ -0,0 +1,186 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package build
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "net/http"
+ "sort"
+
+ "appengine"
+ "appengine/datastore"
+)
+
+func init() {
+ http.HandleFunc("/perflearn", perfLearnHandler)
+}
+
+const (
+ learnPercentile = 0.95
+ learnSignalMultiplier = 1.1
+ learnMinSignal = 0.5
+)
+
+func perfLearnHandler(w http.ResponseWriter, r *http.Request) {
+ d := dashboardForRequest(r)
+ c := d.Context(appengine.NewContext(r))
+
+ pc, err := GetPerfConfig(c, r)
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+
+ p, err := GetPackage(c, "")
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+
+ update := r.FormValue("update") != ""
+ noise := make(map[string]string)
+
+ data := &perfLearnData{}
+
+ commits, err := GetCommits(c, 0, p.NextNum)
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+
+ for _, builder := range pc.BuildersForBenchmark("") {
+ for _, benchmark := range pc.BenchmarksForBuilder(builder) {
+ for _, metric := range pc.MetricsForBenchmark(benchmark) {
+ for _, procs := range pc.ProcList(builder) {
+ values, err := GetPerfMetricsForCommits(c, builder, fmt.Sprintf("%v-%v", benchmark, procs), metric, 0, p.NextNum)
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+ var dd []float64
+ last := uint64(0)
+ for i, v := range values {
+ if v == 0 {
+ if com := commits[i]; com == nil || com.NeedsBenchmarking {
+ last = 0
+ }
+ continue
+ }
+ if last != 0 {
+ v1 := v
+ if v1 < last {
+ v1, last = last, v1
+ }
+ diff := float64(v1)/float64(last)*100 - 100
+ dd = append(dd, diff)
+ }
+ last = v
+ }
+ if len(dd) == 0 {
+ continue
+ }
+ sort.Float64s(dd)
+
+ baseIdx := int(float64(len(dd)) * learnPercentile)
+ baseVal := dd[baseIdx]
+ signalVal := baseVal * learnSignalMultiplier
+ if signalVal < learnMinSignal {
+ signalVal = learnMinSignal
+ }
+ signalIdx := -1
+ noiseNum := 0
+ signalNum := 0
+
+ var diffs []*perfLearnDiff
+ for i, d := range dd {
+ if d > 3*signalVal {
+ d = 3 * signalVal
+ }
+ diffs = append(diffs, &perfLearnDiff{Num: i, Val: d})
+ if signalIdx == -1 && d >= signalVal {
+ signalIdx = i
+ }
+ if d < signalVal {
+ noiseNum++
+ } else {
+ signalNum++
+ }
+ }
+ diffs[baseIdx].Hint = "95%"
+ if signalIdx != -1 {
+ diffs[signalIdx].Hint = "signal"
+ }
+ diffs = diffs[len(diffs)*4/5:]
+ name := fmt.Sprintf("%v/%v-%v/%v", builder, benchmark, procs, metric)
+ data.Entries = append(data.Entries, &perfLearnEntry{len(data.Entries), name, baseVal, noiseNum, signalVal, signalNum, diffs})
+
+ if len(dd) >= 100 || r.FormValue("force") != "" {
+ nname := fmt.Sprintf("%v|%v-%v", builder, benchmark, procs)
+ n := noise[nname] + fmt.Sprintf("|%v=%.2f", metric, signalVal)
+ noise[nname] = n
+ }
+ }
+ }
+ }
+ }
+
+ if update {
+ var noiseLevels []string
+ for k, v := range noise {
+ noiseLevels = append(noiseLevels, k+v)
+ }
+ tx := func(c appengine.Context) error {
+ pc, err := GetPerfConfig(c, r)
+ if err != nil {
+ return err
+ }
+ pc.NoiseLevels = noiseLevels
+ if _, err := datastore.Put(c, PerfConfigKey(c), pc); err != nil {
+ return fmt.Errorf("putting PerfConfig: %v", err)
+ }
+ return nil
+ }
+ if err := datastore.RunInTransaction(c, tx, nil); err != nil {
+ logErr(w, r, err)
+ return
+ }
+ }
+
+ var buf bytes.Buffer
+ if err := perfLearnTemplate.Execute(&buf, data); err != nil {
+ logErr(w, r, err)
+ return
+ }
+
+ buf.WriteTo(w)
+}
+
+var perfLearnTemplate = template.Must(
+ template.New("perf_learn.html").Funcs(tmplFuncs).ParseFiles("build/perf_learn.html"),
+)
+
+type perfLearnData struct {
+ Entries []*perfLearnEntry
+}
+
+type perfLearnEntry struct {
+ Num int
+ Name string
+ BaseVal float64
+ NoiseNum int
+ SignalVal float64
+ SignalNum int
+ Diffs []*perfLearnDiff
+}
+
+type perfLearnDiff struct {
+ Num int
+ Val float64
+ Hint string
+}
diff --git a/llgo/third_party/go.tools/dashboard/app/build/perf_learn.html b/llgo/third_party/go.tools/dashboard/app/build/perf_learn.html
new file mode 100644
index 0000000000000000000000000000000000000000..294e957b6dbb05990a83335ed3a21a19e8d17ba6
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/perf_learn.html
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+
+
+ {{range $.Entries}}
+
+ {{.Name}}: base={{printf "%.2f[%d]" .BaseVal .NoiseNum}} signal={{printf "%.2f[%d]" .SignalVal .SignalNum}}
+
+
+ {{end}}
+
+
diff --git a/llgo/third_party/go.tools/dashboard/app/build/perf_notify.txt b/llgo/third_party/go.tools/dashboard/app/build/perf_notify.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c5e8ebe2cd76e07a106fd3737b038b64a8258284
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/perf_notify.txt
@@ -0,0 +1,13 @@
+{{if .Commit}}Change {{shortHash .Commit.Hash}} caused perf changes on {{.Builder}}:
+
+{{.Commit.Desc}}
+
+http://code.google.com/p/go/source/detail?r={{shortHash .Commit.Hash}}
+{{else}}This changed caused perf changes on {{.Builder}}:
+{{end}}
+{{range $b := .Benchmarks}}
+{{printf "%-16s %12s %12s %10s" $b.Name "old" "new" "delta"}}
+{{range $m := $b.Metrics}}{{printf "%-16s %12v %12v %+10.2f" $m.Name $m.Old $m.New $m.Delta}}
+{{end}}{{end}}
+{{.Url}}
+
diff --git a/llgo/third_party/go.tools/dashboard/app/build/test.go b/llgo/third_party/go.tools/dashboard/app/build/test.go
new file mode 100644
index 0000000000000000000000000000000000000000..34a1c39fb16688512d0ef9bbe4894db29ffb1a09
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/test.go
@@ -0,0 +1,378 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package build
+
+// TODO(adg): test authentication
+// TODO(adg): refactor to use appengine/aetest instead
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strings"
+ "time"
+
+ "appengine"
+ "appengine/datastore"
+)
+
+func init() {
+ http.HandleFunc("/buildtest", testHandler)
+}
+
+var testEntityKinds = []string{
+ "Package",
+ "Commit",
+ "CommitRun",
+ "Result",
+ "PerfResult",
+ "PerfMetricRun",
+ "PerfConfig",
+ "PerfTodo",
+ "Log",
+}
+
+const testPkg = "golang.org/x/test"
+
+var testPackage = &Package{Name: "Test", Kind: "subrepo", Path: testPkg}
+
+var testPackages = []*Package{
+ {Name: "Go", Path: ""},
+ testPackage,
+}
+
+var tCommitTime = time.Now().Add(-time.Hour * 24 * 7)
+
+func tCommit(hash, parentHash, path string, bench bool) *Commit {
+ tCommitTime.Add(time.Hour) // each commit should have a different time
+ return &Commit{
+ PackagePath: path,
+ Hash: hash,
+ ParentHash: parentHash,
+ Time: tCommitTime,
+ User: "adg",
+ Desc: "change description " + hash,
+ NeedsBenchmarking: bench,
+ }
+}
+
+var testRequests = []struct {
+ path string
+ vals url.Values
+ req interface{}
+ res interface{}
+}{
+ // Packages
+ {"/packages", url.Values{"kind": {"subrepo"}}, nil, []*Package{testPackage}},
+
+ // Go repo
+ {"/commit", nil, tCommit("0001", "0000", "", true), nil},
+ {"/commit", nil, tCommit("0002", "0001", "", false), nil},
+ {"/commit", nil, tCommit("0003", "0002", "", true), nil},
+ {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
+ {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
+ {"/result", nil, &Result{Builder: "linux-386", Hash: "0001", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
+ {"/result", nil, &Result{Builder: "linux-386", Hash: "0002", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
+
+ // Other builders, to test the UI.
+ {"/result", nil, &Result{Builder: "linux-amd64", Hash: "0001", OK: true}, nil},
+ {"/result", nil, &Result{Builder: "linux-amd64-race", Hash: "0001", OK: true}, nil},
+ {"/result", nil, &Result{Builder: "netbsd-386", Hash: "0001", OK: true}, nil},
+ {"/result", nil, &Result{Builder: "plan9-386", Hash: "0001", OK: true}, nil},
+ {"/result", nil, &Result{Builder: "windows-386", Hash: "0001", OK: true}, nil},
+ {"/result", nil, &Result{Builder: "windows-amd64", Hash: "0001", OK: true}, nil},
+ {"/result", nil, &Result{Builder: "windows-amd64-race", Hash: "0001", OK: true}, nil},
+ {"/result", nil, &Result{Builder: "linux-amd64-temp", Hash: "0001", OK: true}, nil},
+
+ // multiple builders
+ {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
+ {"/result", nil, &Result{Builder: "linux-amd64", Hash: "0003", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
+ {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0002"}}},
+
+ // branches
+ {"/commit", nil, tCommit("0004", "0003", "", false), nil},
+ {"/commit", nil, tCommit("0005", "0002", "", false), nil},
+ {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0005"}}},
+ {"/result", nil, &Result{Builder: "linux-386", Hash: "0005", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0004"}}},
+ {"/result", nil, &Result{Builder: "linux-386", Hash: "0004", OK: false}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0003"}}},
+
+ // logs
+ {"/result", nil, &Result{Builder: "linux-386", Hash: "0003", OK: false, Log: "test"}, nil},
+ {"/log/a94a8fe5ccb19ba61c4c0873d391e987982fbbd3", nil, nil, "test"},
+ {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, nil},
+
+ // repeat failure (shouldn't re-send mail)
+ {"/result", nil, &Result{Builder: "linux-386", Hash: "0003", OK: false, Log: "test"}, nil},
+
+ // non-Go repos
+ {"/commit", nil, tCommit("1001", "0000", testPkg, false), nil},
+ {"/commit", nil, tCommit("1002", "1001", testPkg, false), nil},
+ {"/commit", nil, tCommit("1003", "1002", testPkg, false), nil},
+ {"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1003"}}},
+ {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1003", GoHash: "0001", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1002"}}},
+ {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1002", GoHash: "0001", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1001"}}},
+ {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1001", GoHash: "0001", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0001"}}, nil, nil},
+ {"/todo", url.Values{"kind": {"build-package"}, "builder": {"linux-386"}, "packagePath": {testPkg}, "goHash": {"0002"}}, nil, &Todo{Kind: "build-package", Data: &Commit{Hash: "1003"}}},
+
+ // re-build Go revision for stale subrepos
+ {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0005"}}},
+ {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1001", GoHash: "0005", OK: false, Log: "boo"}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit"}, "builder": {"linux-386"}}, nil, nil},
+
+ // benchmarks
+ // build-go-commit must have precedence over benchmark-go-commit
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0005"}}},
+ // drain build-go-commit todo
+ {"/result", nil, &Result{Builder: "linux-amd64", Hash: "0005", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0004"}}},
+ {"/result", nil, &Result{Builder: "linux-amd64", Hash: "0004", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0002"}}},
+ {"/result", nil, &Result{Builder: "linux-amd64", Hash: "0002", OK: true}, nil},
+ // drain sub-repo todos
+ {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1001", GoHash: "0005", OK: false}, nil},
+ {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1002", GoHash: "0005", OK: false}, nil},
+ {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1003", GoHash: "0005", OK: false}, nil},
+ // now we must get benchmark todo
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0003", PerfResults: []string{}}}},
+ {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "http", Hash: "0003", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0003", PerfResults: []string{"http"}}}},
+ {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "json", Hash: "0003", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0003", PerfResults: []string{"http", "json"}}}},
+ {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0003", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0001", PerfResults: []string{}}}},
+ {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "http", Hash: "0001", OK: true}, nil},
+ {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0001", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, nil},
+ // create new commit, it must appear in todo
+ {"/commit", nil, tCommit("0006", "0005", "", true), nil},
+ // drain build-go-commit todo
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0006"}}},
+ {"/result", nil, &Result{Builder: "linux-amd64", Hash: "0006", OK: true}, nil},
+ {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1003", GoHash: "0006", OK: false}, nil},
+ {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1002", GoHash: "0006", OK: false}, nil},
+ {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1001", GoHash: "0006", OK: false}, nil},
+ // now we must get benchmark todo
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0006", PerfResults: []string{}}}},
+ {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "http", Hash: "0006", OK: true}, nil},
+ {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0006", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, nil},
+ // create new benchmark, all commits must re-appear in todo
+ {"/commit", nil, tCommit("0007", "0006", "", true), nil},
+ // drain build-go-commit todo
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0007"}}},
+ {"/result", nil, &Result{Builder: "linux-amd64", Hash: "0007", OK: true}, nil},
+ {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1003", GoHash: "0007", OK: false}, nil},
+ {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1002", GoHash: "0007", OK: false}, nil},
+ {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-amd64", Hash: "1001", GoHash: "0007", OK: false}, nil},
+ // now we must get benchmark todo
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0007", PerfResults: []string{}}}},
+ {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "bson", Hash: "0007", OK: true}, nil},
+ {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0007", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0007", PerfResults: []string{"bson"}}}},
+ {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0007", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0006", PerfResults: []string{"http"}}}},
+ {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0006", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0001", PerfResults: []string{"http"}}}},
+ {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0001", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0003", PerfResults: []string{"http", "json"}}}},
+ {"/perf-result", nil, &PerfRequest{Builder: "linux-amd64", Benchmark: "meta-done", Hash: "0003", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-amd64"}}, nil, nil},
+ // attach second builder
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "build-go-commit", Data: &Commit{Hash: "0007"}}},
+ // drain build-go-commit todo
+ {"/result", nil, &Result{Builder: "linux-386", Hash: "0007", OK: true}, nil},
+ {"/result", nil, &Result{Builder: "linux-386", Hash: "0006", OK: true}, nil},
+ {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1003", GoHash: "0007", OK: false}, nil},
+ {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1002", GoHash: "0007", OK: false}, nil},
+ {"/result", nil, &Result{PackagePath: testPkg, Builder: "linux-386", Hash: "1001", GoHash: "0007", OK: false}, nil},
+ // now we must get benchmark todo
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0007"}}},
+ {"/perf-result", nil, &PerfRequest{Builder: "linux-386", Benchmark: "meta-done", Hash: "0007", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0006"}}},
+ {"/perf-result", nil, &PerfRequest{Builder: "linux-386", Benchmark: "meta-done", Hash: "0006", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0001"}}},
+ {"/perf-result", nil, &PerfRequest{Builder: "linux-386", Benchmark: "meta-done", Hash: "0001", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-386"}}, nil, &Todo{Kind: "benchmark-go-commit", Data: &Commit{Hash: "0003"}}},
+ {"/perf-result", nil, &PerfRequest{Builder: "linux-386", Benchmark: "meta-done", Hash: "0003", OK: true}, nil},
+ {"/todo", url.Values{"kind": {"build-go-commit", "benchmark-go-commit"}, "builder": {"linux-386"}}, nil, nil},
+}
+
+func testHandler(w http.ResponseWriter, r *http.Request) {
+ if !appengine.IsDevAppServer() {
+ fmt.Fprint(w, "These tests must be run under the dev_appserver.")
+ return
+ }
+ c := appengine.NewContext(r)
+ if err := nukeEntities(c, testEntityKinds); err != nil {
+ logErr(w, r, err)
+ return
+ }
+ if r.FormValue("nukeonly") != "" {
+ fmt.Fprint(w, "OK")
+ return
+ }
+
+ for _, p := range testPackages {
+ if _, err := datastore.Put(c, p.Key(c), p); err != nil {
+ logErr(w, r, err)
+ return
+ }
+ }
+
+ origReq := *r
+ defer func() {
+ // HACK: We need to clobber the original request (see below)
+ // so make sure we fix it before exiting the handler.
+ *r = origReq
+ }()
+ for i, t := range testRequests {
+ c.Infof("running test %d %s vals='%q' req='%q' res='%q'", i, t.path, t.vals, t.req, t.res)
+ errorf := func(format string, args ...interface{}) {
+ fmt.Fprintf(w, "%d %s: ", i, t.path)
+ fmt.Fprintf(w, format, args...)
+ fmt.Fprintln(w)
+ }
+ var body io.ReadWriter
+ if t.req != nil {
+ body = new(bytes.Buffer)
+ json.NewEncoder(body).Encode(t.req)
+ }
+ url := "http://" + domain + t.path
+ if t.vals != nil {
+ url += "?" + t.vals.Encode() + "&version=2"
+ } else {
+ url += "?version=2"
+ }
+ req, err := http.NewRequest("POST", url, body)
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+ if t.req != nil {
+ req.Method = "POST"
+ }
+ req.Header = origReq.Header
+ rec := httptest.NewRecorder()
+
+ // Make the request
+ *r = *req // HACK: App Engine uses the request pointer
+ // as a map key to resolve Contexts.
+ http.DefaultServeMux.ServeHTTP(rec, r)
+
+ if rec.Code != 0 && rec.Code != 200 {
+ errorf(rec.Body.String())
+ return
+ }
+ c.Infof("response='%v'", rec.Body.String())
+ resp := new(dashResponse)
+
+ // If we're expecting a *Todo value,
+ // prime the Response field with a Todo and a Commit inside it.
+ if t.path == "/todo" {
+ resp.Response = &Todo{Data: &Commit{}}
+ }
+
+ if strings.HasPrefix(t.path, "/log/") {
+ resp.Response = rec.Body.String()
+ } else {
+ err := json.NewDecoder(rec.Body).Decode(resp)
+ if err != nil {
+ errorf("decoding response: %v", err)
+ return
+ }
+ }
+ if e, ok := t.res.(string); ok {
+ g, ok := resp.Response.(string)
+ if !ok {
+ errorf("Response not string: %T", resp.Response)
+ return
+ }
+ if g != e {
+ errorf("response mismatch: got %q want %q", g, e)
+ return
+ }
+ }
+ if e, ok := t.res.(*Todo); ok {
+ g, ok := resp.Response.(*Todo)
+ if !ok {
+ errorf("Response not *Todo: %T", resp.Response)
+ return
+ }
+ if e.Data == nil && g.Data != nil {
+ errorf("Response.Data should be nil, got: %v", g.Data)
+ return
+ }
+ if g.Data == nil {
+ errorf("Response.Data is nil, want: %v", e.Data)
+ return
+ }
+ gd, ok := g.Data.(*Commit)
+ if !ok {
+ errorf("Response.Data not *Commit: %T", g.Data)
+ return
+ }
+ if g.Kind != e.Kind {
+ errorf("kind don't match: got %q, want %q", g.Kind, e.Kind)
+ return
+ }
+ ed := e.Data.(*Commit)
+ if ed.Hash != gd.Hash {
+ errorf("hashes don't match: got %q, want %q", gd.Hash, ed.Hash)
+ return
+ }
+ if len(gd.PerfResults) != len(ed.PerfResults) {
+ errorf("result data len don't match: got %v, want %v", len(gd.PerfResults), len(ed.PerfResults))
+ return
+ }
+ for i := range gd.PerfResults {
+ if gd.PerfResults[i] != ed.PerfResults[i] {
+ errorf("result data %v don't match: got %v, want %v", i, gd.PerfResults[i], ed.PerfResults[i])
+ return
+ }
+ }
+ }
+ if t.res == nil && resp.Response != nil {
+ errorf("response mismatch: got %q expected ", resp.Response)
+ return
+ }
+ }
+ fmt.Fprint(w, "PASS\nYou should see only one mail notification (for 0003/linux-386) in the dev_appserver logs.")
+}
+
+func nukeEntities(c appengine.Context, kinds []string) error {
+ if !appengine.IsDevAppServer() {
+ return errors.New("can't nuke production data")
+ }
+ var keys []*datastore.Key
+ for _, kind := range kinds {
+ q := datastore.NewQuery(kind).KeysOnly()
+ for t := q.Run(c); ; {
+ k, err := t.Next(nil)
+ if err == datastore.Done {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ keys = append(keys, k)
+ }
+ }
+ return datastore.DeleteMulti(c, keys)
+}
diff --git a/llgo/third_party/go.tools/dashboard/app/build/ui.go b/llgo/third_party/go.tools/dashboard/app/build/ui.go
new file mode 100644
index 0000000000000000000000000000000000000000..c2cf7c5a2f39e5fd0e23292ef19ac420337e94e4
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/ui.go
@@ -0,0 +1,460 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO(adg): packages at weekly/release
+// TODO(adg): some means to register new packages
+
+// +build appengine
+
+package build
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "html/template"
+ "net/http"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+
+ "cache"
+
+ "appengine"
+ "appengine/datastore"
+)
+
+func init() {
+ for _, d := range dashboards {
+ http.HandleFunc(d.RelPath, uiHandler)
+ }
+}
+
+// uiHandler draws the build status page.
+func uiHandler(w http.ResponseWriter, r *http.Request) {
+ d := dashboardForRequest(r)
+ c := d.Context(appengine.NewContext(r))
+ now := cache.Now(c)
+ key := "build-ui"
+
+ page, _ := strconv.Atoi(r.FormValue("page"))
+ if page < 0 {
+ page = 0
+ }
+ key += fmt.Sprintf("-page%v", page)
+
+ branch := r.FormValue("branch")
+ if branch != "" {
+ key += "-branch-" + branch
+ }
+
+ repo := r.FormValue("repo")
+ if repo != "" {
+ key += "-repo-" + repo
+ }
+
+ var b []byte
+ if cache.Get(r, now, key, &b) {
+ w.Write(b)
+ return
+ }
+
+ pkg := &Package{} // empty package is the main repository
+ if repo != "" {
+ var err error
+ pkg, err = GetPackage(c, repo)
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+ }
+ commits, err := dashCommits(c, pkg, page, branch)
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+ builders := commitBuilders(commits)
+
+ var tipState *TagState
+ if pkg.Kind == "" && page == 0 && (branch == "" || branch == "default") {
+ // only show sub-repo state on first page of normal repo view
+ tipState, err = TagStateByName(c, "tip")
+ if err != nil {
+ logErr(w, r, err)
+ return
+ }
+ }
+
+ p := &Pagination{}
+ if len(commits) == commitsPerPage {
+ p.Next = page + 1
+ }
+ if page > 0 {
+ p.Prev = page - 1
+ p.HasPrev = true
+ }
+ data := &uiTemplateData{d, pkg, commits, builders, tipState, p, branch}
+
+ var buf bytes.Buffer
+ if err := uiTemplate.Execute(&buf, data); err != nil {
+ logErr(w, r, err)
+ return
+ }
+
+ cache.Set(r, now, key, buf.Bytes())
+
+ buf.WriteTo(w)
+}
+
+type Pagination struct {
+ Next, Prev int
+ HasPrev bool
+}
+
+// dashCommits gets a slice of the latest Commits to the current dashboard.
+// If page > 0 it paginates by commitsPerPage.
+func dashCommits(c appengine.Context, pkg *Package, page int, branch string) ([]*Commit, error) {
+ offset := page * commitsPerPage
+ q := datastore.NewQuery("Commit").
+ Ancestor(pkg.Key(c)).
+ Order("-Num")
+
+ var commits []*Commit
+ if branch == "" {
+ _, err := q.Limit(commitsPerPage).Offset(offset).
+ GetAll(c, &commits)
+ return commits, err
+ }
+
+ // Look for commits on a specific branch.
+ for t, n := q.Run(c), 0; len(commits) < commitsPerPage && n < 1000; {
+ var c Commit
+ _, err := t.Next(&c)
+ if err == datastore.Done {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ if !isBranchCommit(&c, branch) {
+ continue
+ }
+ if n >= offset {
+ commits = append(commits, &c)
+ }
+ n++
+ }
+ return commits, nil
+}
+
+// isBranchCommit reports whether the given commit is on the specified branch.
+// It does so by examining the commit description, so there will be some bad
+// matches where the branch commits do not begin with the "[branch]" prefix.
+func isBranchCommit(c *Commit, b string) bool {
+ d := strings.TrimSpace(c.Desc)
+ if b == "default" {
+ return !strings.HasPrefix(d, "[")
+ }
+ return strings.HasPrefix(d, "["+b+"]")
+}
+
+// commitBuilders returns the names of the builders that provided
+// Results for the provided commits.
+func commitBuilders(commits []*Commit) []string {
+ builders := make(map[string]bool)
+ for _, commit := range commits {
+ for _, r := range commit.Results() {
+ builders[r.Builder] = true
+ }
+ }
+ k := keys(builders)
+ sort.Sort(builderOrder(k))
+ return k
+}
+
+func keys(m map[string]bool) (s []string) {
+ for k := range m {
+ s = append(s, k)
+ }
+ sort.Strings(s)
+ return
+}
+
+// builderOrder implements sort.Interface, sorting builder names
+// ("darwin-amd64", etc) first by builderPriority and then alphabetically.
+type builderOrder []string
+
+func (s builderOrder) Len() int { return len(s) }
+func (s builderOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s builderOrder) Less(i, j int) bool {
+ pi, pj := builderPriority(s[i]), builderPriority(s[j])
+ if pi == pj {
+ return s[i] < s[j]
+ }
+ return pi < pj
+}
+
+func builderPriority(builder string) (p int) {
+ // Put -temp builders at the end, always.
+ if strings.HasSuffix(builder, "-temp") {
+ defer func() { p += 20 }()
+ }
+ // Group race builders together.
+ if isRace(builder) {
+ return 1
+ }
+ // If the OS has a specified priority, use it.
+ if p, ok := osPriority[builderOS(builder)]; ok {
+ return p
+ }
+ // The rest.
+ return 10
+}
+
+func isRace(s string) bool {
+ return strings.Contains(s, "-race-") || strings.HasSuffix(s, "-race")
+}
+
+func unsupported(builder string) bool {
+ if strings.HasSuffix(builder, "-temp") {
+ return true
+ }
+ return unsupportedOS(builderOS(builder))
+}
+
+func unsupportedOS(os string) bool {
+ if os == "race" {
+ return false
+ }
+ p, ok := osPriority[os]
+ return !ok || p > 0
+}
+
+// Priorities for specific operating systems.
+var osPriority = map[string]int{
+ "darwin": 0,
+ "freebsd": 0,
+ "linux": 0,
+ "windows": 0,
+ // race == 1
+ "openbsd": 2,
+ "netbsd": 3,
+ "dragonfly": 4,
+}
+
+// TagState represents the state of all Packages at a Tag.
+type TagState struct {
+ Tag *Commit
+ Packages []*PackageState
+}
+
+// PackageState represents the state of a Package at a Tag.
+type PackageState struct {
+ Package *Package
+ Commit *Commit
+}
+
+// TagStateByName fetches the results for all Go subrepos at the specified Tag.
+func TagStateByName(c appengine.Context, name string) (*TagState, error) {
+ tag, err := GetTag(c, name)
+ if err != nil {
+ return nil, err
+ }
+ pkgs, err := Packages(c, "subrepo")
+ if err != nil {
+ return nil, err
+ }
+ var st TagState
+ for _, pkg := range pkgs {
+ com, err := pkg.LastCommit(c)
+ if err != nil {
+ c.Warningf("%v: no Commit found: %v", pkg, err)
+ continue
+ }
+ st.Packages = append(st.Packages, &PackageState{pkg, com})
+ }
+ st.Tag, err = tag.Commit(c)
+ if err != nil {
+ return nil, err
+ }
+ return &st, nil
+}
+
+type uiTemplateData struct {
+ Dashboard *Dashboard
+ Package *Package
+ Commits []*Commit
+ Builders []string
+ TipState *TagState
+ Pagination *Pagination
+ Branch string
+}
+
+var uiTemplate = template.Must(
+ template.New("ui.html").Funcs(tmplFuncs).ParseFiles("build/ui.html"),
+)
+
+var tmplFuncs = template.FuncMap{
+ "buildDashboards": buildDashboards,
+ "builderOS": builderOS,
+ "builderSpans": builderSpans,
+ "builderSubheading": builderSubheading,
+ "builderTitle": builderTitle,
+ "repoURL": repoURL,
+ "shortDesc": shortDesc,
+ "shortHash": shortHash,
+ "shortUser": shortUser,
+ "tail": tail,
+ "unsupported": unsupported,
+}
+
+func splitDash(s string) (string, string) {
+ i := strings.Index(s, "-")
+ if i >= 0 {
+ return s[:i], s[i+1:]
+ }
+ return s, ""
+}
+
+// builderOS returns the os tag for a builder string
+func builderOS(s string) string {
+ os, _ := splitDash(s)
+ return os
+}
+
+// builderOSOrRace returns the builder OS or, if it is a race builder, "race".
+func builderOSOrRace(s string) string {
+ if isRace(s) {
+ return "race"
+ }
+ return builderOS(s)
+}
+
+// builderArch returns the arch tag for a builder string
+func builderArch(s string) string {
+ _, arch := splitDash(s)
+ arch, _ = splitDash(arch) // chop third part
+ return arch
+}
+
+// builderSubheading returns a short arch tag for a builder string
+// or, if it is a race builder, the builder OS.
+func builderSubheading(s string) string {
+ if isRace(s) {
+ return builderOS(s)
+ }
+ arch := builderArch(s)
+ switch arch {
+ case "amd64":
+ return "x64"
+ }
+ return arch
+}
+
+// builderArchChar returns the architecture letter for a builder string
+func builderArchChar(s string) string {
+ arch := builderArch(s)
+ switch arch {
+ case "386":
+ return "8"
+ case "amd64":
+ return "6"
+ case "arm":
+ return "5"
+ }
+ return arch
+}
+
+type builderSpan struct {
+ N int
+ OS string
+ Unsupported bool
+}
+
+// builderSpans creates a list of tags showing
+// the builder's operating system names, spanning
+// the appropriate number of columns.
+func builderSpans(s []string) []builderSpan {
+ var sp []builderSpan
+ for len(s) > 0 {
+ i := 1
+ os := builderOSOrRace(s[0])
+ u := unsupportedOS(os) || strings.HasSuffix(s[0], "-temp")
+ for i < len(s) && builderOSOrRace(s[i]) == os {
+ i++
+ }
+ sp = append(sp, builderSpan{i, os, u})
+ s = s[i:]
+ }
+ return sp
+}
+
+// builderTitle formats "linux-amd64-foo" as "linux amd64 foo".
+func builderTitle(s string) string {
+ return strings.Replace(s, "-", " ", -1)
+}
+
+// buildDashboards returns the known public dashboards.
+func buildDashboards() []*Dashboard {
+ return dashboards
+}
+
+// shortDesc returns the first line of a description.
+func shortDesc(desc string) string {
+ if i := strings.Index(desc, "\n"); i != -1 {
+ desc = desc[:i]
+ }
+ return limitStringLength(desc, 100)
+}
+
+// shortHash returns a short version of a hash.
+func shortHash(hash string) string {
+ if len(hash) > 12 {
+ hash = hash[:12]
+ }
+ return hash
+}
+
+// shortUser returns a shortened version of a user string.
+func shortUser(user string) string {
+ if i, j := strings.Index(user, "<"), strings.Index(user, ">"); 0 <= i && i < j {
+ user = user[i+1 : j]
+ }
+ if i := strings.Index(user, "@"); i >= 0 {
+ return user[:i]
+ }
+ return user
+}
+
+// repoRe matches Google Code repositories and subrepositories (without paths).
+var repoRe = regexp.MustCompile(`^code\.google\.com/p/([a-z0-9\-]+)(\.[a-z0-9\-]+)?$`)
+
+// repoURL returns the URL of a change at a Google Code repository or subrepo.
+func repoURL(dashboard, hash, packagePath string) (string, error) {
+ if packagePath == "" {
+ if dashboard == "Gccgo" {
+ return "https://code.google.com/p/gofrontend/source/detail?r=" + hash, nil
+ }
+ return "https://code.google.com/p/go/source/detail?r=" + hash, nil
+ }
+ m := repoRe.FindStringSubmatch(packagePath)
+ if m == nil {
+ return "", errors.New("unrecognized package: " + packagePath)
+ }
+ url := "https://code.google.com/p/" + m[1] + "/source/detail?r=" + hash
+ if len(m) > 2 {
+ url += "&repo=" + m[2][1:]
+ }
+ return url, nil
+}
+
+// tail returns the trailing n lines of s.
+func tail(n int, s string) string {
+ lines := strings.Split(s, "\n")
+ if len(lines) < n {
+ return s
+ }
+ return strings.Join(lines[len(lines)-n:], "\n")
+}
diff --git a/llgo/third_party/go.tools/dashboard/app/build/ui.html b/llgo/third_party/go.tools/dashboard/app/build/ui.html
new file mode 100644
index 0000000000000000000000000000000000000000..6ae268c664e7577126cf6b39f739aea8a5255fd7
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/ui.html
@@ -0,0 +1,210 @@
+
+
+
+ {{$.Dashboard.Name}} Build Dashboard
+
+
+
+
+
+
+
+
+
+ {{range buildDashboards}}
+ {{.Name}}
+ {{end}}
+
+
+ show only first-class ports
+
+
+ {{with $.Package.Name}}{{.}} {{end}}
+
+
+
+ {{if $.Commits}}
+
+
+
+ {{range $.Builders | builderSpans}}
+
+ {{end}}
+
+
+
+
+
+
+
+ {{if $.Package.Path}}
+ revision
+ {{else}}
+
+ {{end}}
+ {{range $.Builders | builderSpans}}
+ {{.OS}}
+ {{end}}
+
+
+
+
+
+ {{if $.Package.Path}}
+ repo
+ {{$.Dashboard.Name}}
+ {{else}}
+
+ {{end}}
+ {{range $.Builders}}
+ {{builderSubheading .}}
+ {{end}}
+
+
+
+
+ {{range $c := $.Commits}}
+ {{range $i, $h := $c.ResultGoHashes}}
+
+ {{if $i}}
+
+ {{else}}
+ {{shortHash $c.Hash}}
+ {{end}}
+ {{if $h}}
+ {{shortHash $h}}
+ {{end}}
+ {{range $.Builders}}
+
+ {{with $c.Result . $h}}
+ {{if .OK}}
+ ok
+ {{else}}
+ fail
+ {{end}}
+ {{else}}
+
+ {{end}}
+
+ {{end}}
+ {{if $i}}
+
+
+
+ {{else}}
+ {{shortUser $c.User}}
+ {{$c.Time.Format "Mon 02 Jan 15:04"}}
+ {{shortDesc $c.Desc}}
+ {{end}}
+
+ {{end}}
+ {{end}}
+
+
+ {{with $.Pagination}}
+
+ {{end}}
+
+ {{else}}
+
No commits to display. Hm.
+ {{end}}
+
+ {{with $.TipState}}
+ {{$goHash := .Tag.Hash}}
+ {{if .Packages}}
+
+
+
+
+
+ {{range $.Builders | builderSpans}}
+
+ {{end}}
+
+
+
+
+
+
+
+
+
+ {{range $.Builders | builderSpans}}
+ {{.OS}}
+ {{end}}
+
+
+
+
+
+
+
+ {{range $.Builders}}
+ {{builderSubheading .}}
+ {{end}}
+
+
+
+
+ {{range $pkg := .Packages}}
+
+ {{.Package.Name}}
+
+ {{$h := $pkg.Commit.Hash}}
+ {{shortHash $h}}
+
+ {{range $.Builders}}
+
+ {{with $pkg.Commit.Result . $goHash}}
+ {{if .OK}}
+ ok
+ {{else}}
+ fail
+ {{end}}
+ {{else}}
+
+ {{end}}
+
+ {{end}}
+ {{with $pkg.Commit}}
+ {{shortUser .User}}
+ {{.Time.Format "Mon 02 Jan 15:04"}}
+ {{shortDesc .Desc}}
+ {{end}}
+
+ {{end}}
+
+ {{end}}
+ {{end}}
+
+
+
+
diff --git a/llgo/third_party/go.tools/dashboard/app/build/update.go b/llgo/third_party/go.tools/dashboard/app/build/update.go
new file mode 100644
index 0000000000000000000000000000000000000000..1d22cc9f13afa9ecedecd2a40670e7f2959b1752
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/build/update.go
@@ -0,0 +1,117 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package build
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ "appengine"
+ "appengine/datastore"
+)
+
+func init() {
+ http.HandleFunc("/updatebenchmark", updateBenchmark)
+}
+
+func updateBenchmark(w http.ResponseWriter, r *http.Request) {
+ if !appengine.IsDevAppServer() {
+ fmt.Fprint(w, "Update must not run on real server.")
+ return
+ }
+
+ if r.Method != "POST" {
+ fmt.Fprintf(w, "bad request method")
+ return
+ }
+
+ c := contextForRequest(r)
+ if !validKey(c, r.FormValue("key"), r.FormValue("builder")) {
+ fmt.Fprintf(w, "bad builder/key")
+ return
+ }
+
+ defer r.Body.Close()
+ var hashes []string
+ if err := json.NewDecoder(r.Body).Decode(&hashes); err != nil {
+ fmt.Fprintf(w, "failed to decode request: %v", err)
+ return
+ }
+
+ ncommit := 0
+ nrun := 0
+ tx := func(c appengine.Context) error {
+ var cr *CommitRun
+ for _, hash := range hashes {
+ // Update Commit.
+ com := &Commit{Hash: hash}
+ err := datastore.Get(c, com.Key(c), com)
+ if err != nil && err != datastore.ErrNoSuchEntity {
+ return fmt.Errorf("fetching Commit: %v", err)
+ }
+ if err == datastore.ErrNoSuchEntity {
+ continue
+ }
+ com.NeedsBenchmarking = true
+ com.PerfResults = nil
+ if err := putCommit(c, com); err != nil {
+ return err
+ }
+ ncommit++
+
+ // create PerfResult
+ res := &PerfResult{CommitHash: com.Hash, CommitNum: com.Num}
+ err = datastore.Get(c, res.Key(c), res)
+ if err != nil && err != datastore.ErrNoSuchEntity {
+ return fmt.Errorf("fetching PerfResult: %v", err)
+ }
+ if err == datastore.ErrNoSuchEntity {
+ if _, err := datastore.Put(c, res.Key(c), res); err != nil {
+ return fmt.Errorf("putting PerfResult: %v", err)
+ }
+ }
+
+ // Update CommitRun.
+ if cr != nil && cr.StartCommitNum != com.Num/PerfRunLength*PerfRunLength {
+ if _, err := datastore.Put(c, cr.Key(c), cr); err != nil {
+ return fmt.Errorf("putting CommitRun: %v", err)
+ }
+ nrun++
+ cr = nil
+ }
+ if cr == nil {
+ var err error
+ cr, err = GetCommitRun(c, com.Num)
+ if err != nil {
+ return fmt.Errorf("getting CommitRun: %v", err)
+ }
+ }
+ if com.Num < cr.StartCommitNum || com.Num >= cr.StartCommitNum+PerfRunLength {
+ return fmt.Errorf("commit num %v out of range [%v, %v)", com.Num, cr.StartCommitNum, cr.StartCommitNum+PerfRunLength)
+ }
+ idx := com.Num - cr.StartCommitNum
+ cr.Hash[idx] = com.Hash
+ cr.User[idx] = shortDesc(com.User)
+ cr.Desc[idx] = shortDesc(com.Desc)
+ cr.Time[idx] = com.Time
+ cr.NeedsBenchmarking[idx] = com.NeedsBenchmarking
+ }
+ if cr != nil {
+ if _, err := datastore.Put(c, cr.Key(c), cr); err != nil {
+ return fmt.Errorf("putting CommitRun: %v", err)
+ }
+ nrun++
+ }
+ return nil
+ }
+ if err := datastore.RunInTransaction(c, tx, nil); err != nil {
+ fmt.Fprintf(w, "failed to execute tx: %v", err)
+ return
+ }
+ fmt.Fprintf(w, "OK (updated %v commits and %v commit runs)", ncommit, nrun)
+}
diff --git a/llgo/third_party/go.tools/dashboard/app/cache/cache.go b/llgo/third_party/go.tools/dashboard/app/cache/cache.go
new file mode 100644
index 0000000000000000000000000000000000000000..4b57614e1a9859fe6c246726e4fdad7114db4f38
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/cache/cache.go
@@ -0,0 +1,86 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package cache
+
+import (
+ "fmt"
+ "net/http"
+ "time"
+
+ "appengine"
+ "appengine/memcache"
+)
+
+// TimeKey specifies the memcache entity that keeps the logical datastore time.
+var TimeKey = "cachetime"
+
+const (
+ nocache = "nocache"
+ expiry = 600 // 10 minutes
+)
+
+func newTime() uint64 { return uint64(time.Now().Unix()) << 32 }
+
+// Now returns the current logical datastore time to use for cache lookups.
+func Now(c appengine.Context) uint64 {
+ t, err := memcache.Increment(c, TimeKey, 0, newTime())
+ if err != nil {
+ c.Errorf("cache.Now: %v", err)
+ return 0
+ }
+ return t
+}
+
+// Tick sets the current logical datastore time to a never-before-used time
+// and returns that time. It should be called to invalidate the cache.
+func Tick(c appengine.Context) uint64 {
+ t, err := memcache.Increment(c, TimeKey, 1, newTime())
+ if err != nil {
+ c.Errorf("cache.Tick: %v", err)
+ return 0
+ }
+ return t
+}
+
+// Get fetches data for name at time now from memcache and unmarshals it into
+// value. It reports whether it found the cache record and logs any errors to
+// the admin console.
+func Get(r *http.Request, now uint64, name string, value interface{}) bool {
+ if now == 0 || r.FormValue(nocache) != "" {
+ return false
+ }
+ c := appengine.NewContext(r)
+ key := fmt.Sprintf("%s.%d", name, now)
+ _, err := memcache.JSON.Get(c, key, value)
+ if err == nil {
+ c.Debugf("cache hit %q", key)
+ return true
+ }
+ c.Debugf("cache miss %q", key)
+ if err != memcache.ErrCacheMiss {
+ c.Errorf("get cache %q: %v", key, err)
+ }
+ return false
+}
+
+// Set puts value into memcache under name at time now.
+// It logs any errors to the admin console.
+func Set(r *http.Request, now uint64, name string, value interface{}) {
+ if now == 0 || r.FormValue(nocache) != "" {
+ return
+ }
+ c := appengine.NewContext(r)
+ key := fmt.Sprintf("%s.%d", name, now)
+ err := memcache.JSON.Set(c, &memcache.Item{
+ Key: key,
+ Object: value,
+ Expiration: expiry,
+ })
+ if err != nil {
+ c.Errorf("set cache %q: %v", key, err)
+ }
+}
diff --git a/llgo/third_party/go.tools/dashboard/app/cron.yaml b/llgo/third_party/go.tools/dashboard/app/cron.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4118f9eb61848414aa0e16e2bb371bede514b008
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/cron.yaml
@@ -0,0 +1,5 @@
+cron:
+- description: updates noise level for benchmarking results
+ url: /perflearn?update=1
+ schedule: every 24 hours
+
diff --git a/llgo/third_party/go.tools/dashboard/app/index.yaml b/llgo/third_party/go.tools/dashboard/app/index.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..670a667f5baecebf9bd42aa800961c103aa49961
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/index.yaml
@@ -0,0 +1,54 @@
+indexes:
+
+- kind: Commit
+ ancestor: yes
+ properties:
+ - name: Num
+ direction: desc
+
+- kind: Commit
+ ancestor: yes
+ properties:
+ - name: Time
+ direction: desc
+
+- kind: Commit
+ ancestor: yes
+ properties:
+ - name: NeedsBenchmarking
+ - name: Num
+ direction: desc
+
+- kind: CommitRun
+ ancestor: yes
+ properties:
+ - name: StartCommitNum
+ direction: desc
+
+- kind: PerfResult
+ ancestor: yes
+ properties:
+ - name: CommitNum
+ direction: desc
+
+- kind: PerfResult
+ ancestor: yes
+ properties:
+ - name: CommitNum
+ direction: asc
+
+- kind: CommitRun
+ ancestor: yes
+ properties:
+ - name: StartCommitNum
+ direction: asc
+
+- kind: PerfMetricRun
+ ancestor: yes
+ properties:
+ - name: Builder
+ - name: Benchmark
+ - name: Metric
+ - name: StartCommitNum
+ direction: asc
+
diff --git a/llgo/third_party/go.tools/dashboard/app/key/key.go b/llgo/third_party/go.tools/dashboard/app/key/key.go
new file mode 100644
index 0000000000000000000000000000000000000000..e52554f100d4e0fb45a1ab73bc7ee1e3a1de485e
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/key/key.go
@@ -0,0 +1,64 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package key
+
+import (
+ "sync"
+
+ "appengine"
+ "appengine/datastore"
+)
+
+var theKey struct {
+ sync.RWMutex
+ builderKey
+}
+
+type builderKey struct {
+ Secret string
+}
+
+func (k *builderKey) Key(c appengine.Context) *datastore.Key {
+ return datastore.NewKey(c, "BuilderKey", "root", 0, nil)
+}
+
+func Secret(c appengine.Context) string {
+ // check with rlock
+ theKey.RLock()
+ k := theKey.Secret
+ theKey.RUnlock()
+ if k != "" {
+ return k
+ }
+
+ // prepare to fill; check with lock and keep lock
+ theKey.Lock()
+ defer theKey.Unlock()
+ if theKey.Secret != "" {
+ return theKey.Secret
+ }
+
+ // fill
+ if err := datastore.Get(c, theKey.Key(c), &theKey.builderKey); err != nil {
+ if err == datastore.ErrNoSuchEntity {
+ // If the key is not stored in datastore, write it.
+ // This only happens at the beginning of a new deployment.
+ // The code is left here for SDK use and in case a fresh
+ // deployment is ever needed. "gophers rule" is not the
+ // real key.
+ if !appengine.IsDevAppServer() {
+ panic("lost key from datastore")
+ }
+ theKey.Secret = "gophers rule"
+ datastore.Put(c, theKey.Key(c), &theKey.builderKey)
+ return theKey.Secret
+ }
+ panic("cannot load builder key: " + err.Error())
+ }
+
+ return theKey.Secret
+}
diff --git a/llgo/third_party/go.tools/dashboard/app/static/status_alert.gif b/llgo/third_party/go.tools/dashboard/app/static/status_alert.gif
new file mode 100644
index 0000000000000000000000000000000000000000..495d9d2e0c7a7570c22c9a1ad3d673a789636962
Binary files /dev/null and b/llgo/third_party/go.tools/dashboard/app/static/status_alert.gif differ
diff --git a/llgo/third_party/go.tools/dashboard/app/static/status_good.gif b/llgo/third_party/go.tools/dashboard/app/static/status_good.gif
new file mode 100644
index 0000000000000000000000000000000000000000..ef9c5a8f6458b03f0ae08209ad17ed2ec3a563e2
Binary files /dev/null and b/llgo/third_party/go.tools/dashboard/app/static/status_good.gif differ
diff --git a/llgo/third_party/go.tools/dashboard/app/static/style.css b/llgo/third_party/go.tools/dashboard/app/static/style.css
new file mode 100644
index 0000000000000000000000000000000000000000..ddf21296c772efd44c28088a90069c4a1069b79f
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/app/static/style.css
@@ -0,0 +1,308 @@
+* { box-sizing: border-box; }
+
+ .dashboards {
+ padding: 0.5em;
+ }
+ .dashboards a {
+ padding: 0.5em;
+ background: #eee;
+ color: blue;
+ }
+
+body {
+ margin: 0;
+ font-family: sans-serif;
+ padding: 0; margin: 0;
+ color: #222;
+}
+
+.container {
+ max-width: 900px;
+ margin: 0 auto;
+}
+
+p, pre, ul, ol { margin: 20px; }
+
+h1, h2, h3, h4 {
+ margin: 20px 0;
+ padding: 0;
+ color: #375EAB;
+ font-weight: bold;
+}
+
+h1 { font-size: 24px; }
+h2 { font-size: 20px; }
+h3 { font-size: 20px; }
+h4 { font-size: 16px; }
+
+h2 { background: #E0EBF5; padding: 2px 5px; }
+h3, h4 { margin: 20px 5px; }
+
+dl, dd { font-size: 14px; }
+dl { margin: 20px; }
+dd { margin: 2px 20px; }
+
+.clear {
+ clear: both;
+}
+
+.button {
+ padding: 10px;
+
+ color: #222;
+ border: 1px solid #375EAB;
+ background: #E0EBF5;
+
+ border-radius: 5px;
+
+ cursor: pointer;
+
+ margin-left: 60px;
+}
+
+/* navigation bar */
+
+#topbar {
+ padding: 10px 10px;
+ background: #E0EBF5;
+}
+
+#topbar a {
+ color: #222;
+}
+#topbar h1 {
+ float: left;
+ margin: 0;
+ padding-top: 5px;
+}
+
+#topbar nav {
+ float: left;
+ margin-left: 20px;
+}
+#topbar nav a {
+ display: inline-block;
+ padding: 10px;
+
+ margin: 0;
+ margin-right: 5px;
+
+ color: white;
+ background: #375EAB;
+
+ text-decoration: none;
+ font-size: 16px;
+
+ border: 1px solid #375EAB;
+ -webkit-border-radius: 5px;
+ -moz-border-radius: 5px;
+ border-radius: 5px;
+}
+
+.page {
+ margin-top: 20px;
+}
+
+/* settings panels */
+aside {
+ margin-top: 5px;
+}
+
+.panel {
+ border: 1px solid #aaa;
+ border-radius: 5px;
+ margin-bottom: 5px;
+}
+
+.panel h1 {
+ font-size: 16px;
+ margin: 0;
+ padding: 2px 8px;
+}
+
+.panel select {
+ padding: 5px;
+ border: 0;
+ width: 100%;
+}
+
+/* results table */
+
+table {
+ margin: 5px;
+ border-collapse: collapse;
+ font-size: 11px;
+}
+
+table td, table th, table td, table th {
+ vertical-align: top;
+ padding: 2px 6px;
+}
+
+table tr:nth-child(2n+1) {
+ background: #F4F4F4;
+}
+
+table thead tr {
+ background: #fff !important;
+}
+
+/* build results */
+
+.build td, .build th, .packages td, .packages th {
+ vertical-align: top;
+ padding: 2px 4px;
+ font-size: 10pt;
+}
+
+.build .hash {
+ font-family: monospace;
+ font-size: 9pt;
+}
+
+.build .result {
+ text-align: center;
+ width: 2em;
+}
+
+.build .col-hash, .build .col-result, .build .col-metric, .build .col-numresults {
+ border-right: 1px solid #ccc;
+}
+
+.build .row-commit {
+ border-top: 2px solid #ccc;
+}
+
+.build .arch {
+ font-size: 83%;
+ font-weight: normal;
+}
+
+.build .time {
+ color: #666;
+}
+
+.build .ok {
+ font-size: 83%;
+}
+
+.build .desc, .build .time, .build .user {
+ white-space: nowrap;
+}
+
+.build .desc {
+ text-align: left;
+ max-width: 470px;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.good { text-decoration: none; color: #000000; border: 2px solid #00E700}
+.bad { text-decoration: none; text-shadow: 1px 1px 0 #000000; color: #FFFFFF; background: #E70000;}
+.noise { text-decoration: none; color: #888; }
+.fail { color: #C00; }
+
+/* pagination */
+
+.paginate nav {
+ padding: 0.5em;
+ margin: 10px 0;
+}
+
+.paginate nav a {
+ padding: 0.5em;
+ background: #E0EBF5;
+ color: blue;
+
+ -webkit-border-radius: 5px;
+ -moz-border-radius: 5px;
+ border-radius: 5px;
+}
+
+.paginate nav a.inactive {
+ color: #888;
+ cursor: default;
+ text-decoration: none;
+}
+
+/* diffs */
+
+.diff-meta {
+ font-family: monospace;
+ margin-bottom: 10px;
+}
+
+.diff-container {
+ padding: 10px;
+}
+
+.diff table .metric {
+ font-weight: bold;
+}
+
+.diff {
+ border: 1px solid #aaa;
+ border-radius: 5px;
+ margin-bottom: 5px;
+ margin-right: 10px;
+ float: left;
+}
+
+.diff h1 {
+ font-size: 16px;
+ margin: 0;
+ padding: 2px 8px;
+}
+
+.diff-benchmark {
+ clear: both;
+ padding-top: 5px;
+}
+
+/* positioning elements */
+
+.page {
+ position: relative;
+ width: 100%;
+}
+
+aside {
+ position: absolute;
+ top: 0;
+ left: 0;
+ bottom: 0;
+ width: 200px;
+}
+
+.main-content {
+ position: absolute;
+ top: 0;
+ left: 210px;
+ right: 5px;
+ min-height: 200px;
+ overflow: hidden;
+}
+
+@media only screen and (max-width: 900px) {
+ aside {
+ position: relative;
+ display: block;
+ width: auto;
+ }
+
+ .main-content {
+ position: static;
+ padding: 0;
+ }
+
+ aside .panel {
+ float: left;
+ width: auto;
+ margin-right: 5px;
+ }
+ aside .button {
+ float: left;
+ margin: 0;
+ }
+}
+
diff --git a/llgo/third_party/go.tools/dashboard/builder/bench.go b/llgo/third_party/go.tools/dashboard/builder/bench.go
new file mode 100644
index 0000000000000000000000000000000000000000..fb99aac51e76cd437a9b005f824c11c1c54db5c4
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/builder/bench.go
@@ -0,0 +1,256 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// benchHash benchmarks a single commit.
+func (b *Builder) benchHash(hash string, benchs []string) error {
+ if *verbose {
+ log.Println(b.name, "benchmarking", hash)
+ }
+
+ res := &PerfResult{Hash: hash, Benchmark: "meta-done"}
+
+ // Create place in which to do work.
+ workpath := filepath.Join(*buildroot, b.name+"-"+hash[:12])
+ // Prepare a workpath if we don't have one we can reuse.
+ update := false
+ if b.lastWorkpath != workpath {
+ if err := os.Mkdir(workpath, mkdirPerm); err != nil {
+ return err
+ }
+ buildLog, _, err := b.buildRepoOnHash(workpath, hash, makeCmd)
+ if err != nil {
+ removePath(workpath)
+ // record failure
+ res.Artifacts = append(res.Artifacts, PerfArtifact{"log", buildLog})
+ return b.recordPerfResult(res)
+ }
+ b.lastWorkpath = workpath
+ update = true
+ }
+
+ // Build the benchmark binary.
+ benchBin, buildLog, err := b.buildBenchmark(workpath, update)
+ if err != nil {
+ // record failure
+ res.Artifacts = append(res.Artifacts, PerfArtifact{"log", buildLog})
+ return b.recordPerfResult(res)
+ }
+
+ benchmark, procs, affinity, last := chooseBenchmark(benchBin, benchs)
+ if benchmark != "" {
+ res.Benchmark = fmt.Sprintf("%v-%v", benchmark, procs)
+ res.Metrics, res.Artifacts, res.OK = b.executeBenchmark(workpath, hash, benchBin, benchmark, procs, affinity)
+ if err = b.recordPerfResult(res); err != nil {
+ return fmt.Errorf("recordResult: %s", err)
+ }
+ }
+
+ if last {
+ // All benchmarks have beed executed, don't need workpath anymore.
+ removePath(b.lastWorkpath)
+ b.lastWorkpath = ""
+ // Notify the app.
+ res = &PerfResult{Hash: hash, Benchmark: "meta-done", OK: true}
+ if err = b.recordPerfResult(res); err != nil {
+ return fmt.Errorf("recordResult: %s", err)
+ }
+ }
+
+ return nil
+}
+
+// buildBenchmark builds the benchmark binary.
+func (b *Builder) buildBenchmark(workpath string, update bool) (benchBin, log string, err error) {
+ goroot := filepath.Join(workpath, "go")
+ gobin := filepath.Join(goroot, "bin", "go") + exeExt
+ gopath := filepath.Join(*buildroot, "gopath")
+ env := append([]string{
+ "GOROOT=" + goroot,
+ "GOPATH=" + gopath},
+ b.envv()...)
+ // First, download without installing.
+ args := []string{"get", "-d"}
+ if update {
+ args = append(args, "-u")
+ }
+ args = append(args, *benchPath)
+ var buildlog bytes.Buffer
+ runOpts := []runOpt{runTimeout(*buildTimeout), runEnv(env), allOutput(&buildlog), runDir(workpath)}
+ err = run(exec.Command(gobin, args...), runOpts...)
+ if err != nil {
+ fmt.Fprintf(&buildlog, "go get -d %s failed: %s", *benchPath, err)
+ return "", buildlog.String(), err
+ }
+ // Then, build into workpath.
+ benchBin = filepath.Join(workpath, "benchbin") + exeExt
+ args = []string{"build", "-o", benchBin, *benchPath}
+ buildlog.Reset()
+ err = run(exec.Command(gobin, args...), runOpts...)
+ if err != nil {
+ fmt.Fprintf(&buildlog, "go build %s failed: %s", *benchPath, err)
+ return "", buildlog.String(), err
+ }
+ return benchBin, "", nil
+}
+
+// chooseBenchmark chooses the next benchmark to run
+// based on the list of available benchmarks, already executed benchmarks
+// and -benchcpu list.
+func chooseBenchmark(benchBin string, doneBenchs []string) (bench string, procs, affinity int, last bool) {
+ var out bytes.Buffer
+ err := run(exec.Command(benchBin), allOutput(&out))
+ if err != nil {
+ log.Printf("Failed to query benchmark list: %v\n%s", err, out)
+ last = true
+ return
+ }
+ outStr := out.String()
+ nlIdx := strings.Index(outStr, "\n")
+ if nlIdx < 0 {
+ log.Printf("Failed to parse benchmark list (no new line): %s", outStr)
+ last = true
+ return
+ }
+ localBenchs := strings.Split(outStr[:nlIdx], ",")
+ benchsMap := make(map[string]bool)
+ for _, b := range doneBenchs {
+ benchsMap[b] = true
+ }
+ cnt := 0
+ // We want to run all benchmarks with GOMAXPROCS=1 first.
+ for i, procs1 := range benchCPU {
+ for _, bench1 := range localBenchs {
+ if benchsMap[fmt.Sprintf("%v-%v", bench1, procs1)] {
+ continue
+ }
+ cnt++
+ if cnt == 1 {
+ bench = bench1
+ procs = procs1
+ if i < len(benchAffinity) {
+ affinity = benchAffinity[i]
+ }
+ }
+ }
+ }
+ last = cnt <= 1
+ return
+}
+
+// executeBenchmark runs a single benchmark and parses its output.
+func (b *Builder) executeBenchmark(workpath, hash, benchBin, bench string, procs, affinity int) (metrics []PerfMetric, artifacts []PerfArtifact, ok bool) {
+ // Benchmarks runs mutually exclusive with other activities.
+ benchMutex.RUnlock()
+ defer benchMutex.RLock()
+ benchMutex.Lock()
+ defer benchMutex.Unlock()
+
+ log.Printf("%v executing benchmark %v-%v on %v", b.name, bench, procs, hash)
+
+ // The benchmark executes 'go build'/'go tool',
+ // so we need properly setup env.
+ env := append([]string{
+ "GOROOT=" + filepath.Join(workpath, "go"),
+ "PATH=" + filepath.Join(workpath, "go", "bin") + string(os.PathListSeparator) + os.Getenv("PATH"),
+ "GODEBUG=gctrace=1", // since Go1.2
+ "GOGCTRACE=1", // before Go1.2
+ fmt.Sprintf("GOMAXPROCS=%v", procs)},
+ b.envv()...)
+ args := []string{
+ "-bench", bench,
+ "-benchmem", strconv.Itoa(*benchMem),
+ "-benchtime", benchTime.String(),
+ "-benchnum", strconv.Itoa(*benchNum),
+ "-tmpdir", workpath}
+ if affinity != 0 {
+ args = append(args, "-affinity", strconv.Itoa(affinity))
+ }
+ benchlog := new(bytes.Buffer)
+ err := run(exec.Command(benchBin, args...), runEnv(env), allOutput(benchlog), runDir(workpath))
+ if strip := benchlog.Len() - 512<<10; strip > 0 {
+ // Leave the last 512K, that part contains metrics.
+ benchlog = bytes.NewBuffer(benchlog.Bytes()[strip:])
+ }
+ artifacts = []PerfArtifact{{Type: "log", Body: benchlog.String()}}
+ if err != nil {
+ if err != nil {
+ log.Printf("Failed to execute benchmark '%v': %v", bench, err)
+ ok = false
+ }
+ return
+ }
+
+ metrics1, artifacts1, err := parseBenchmarkOutput(benchlog)
+ if err != nil {
+ log.Printf("Failed to parse benchmark output: %v", err)
+ ok = false
+ return
+ }
+ metrics = metrics1
+ artifacts = append(artifacts, artifacts1...)
+ ok = true
+ return
+}
+
+// parseBenchmarkOutput fetches metrics and artifacts from benchmark output.
+func parseBenchmarkOutput(out io.Reader) (metrics []PerfMetric, artifacts []PerfArtifact, err error) {
+ s := bufio.NewScanner(out)
+ metricRe := regexp.MustCompile("^GOPERF-METRIC:([a-z,0-9,-]+)=([0-9]+)$")
+ fileRe := regexp.MustCompile("^GOPERF-FILE:([a-z,0-9,-]+)=(.+)$")
+ for s.Scan() {
+ ln := s.Text()
+ if ss := metricRe.FindStringSubmatch(ln); ss != nil {
+ var v uint64
+ v, err = strconv.ParseUint(ss[2], 10, 64)
+ if err != nil {
+ err = fmt.Errorf("Failed to parse metric '%v=%v': %v", ss[1], ss[2], err)
+ return
+ }
+ metrics = append(metrics, PerfMetric{Type: ss[1], Val: v})
+ } else if ss := fileRe.FindStringSubmatch(ln); ss != nil {
+ var buf []byte
+ buf, err = ioutil.ReadFile(ss[2])
+ if err != nil {
+ err = fmt.Errorf("Failed to read file '%v': %v", ss[2], err)
+ return
+ }
+ artifacts = append(artifacts, PerfArtifact{ss[1], string(buf)})
+ }
+ }
+ return
+}
+
+// needsBenchmarking determines whether the commit needs benchmarking.
+func needsBenchmarking(log *HgLog) bool {
+ // Do not benchmark branch commits, they are usually not interesting
+ // and fall out of the trunk succession.
+ if log.Branch != "" {
+ return false
+ }
+ // Do not benchmark commits that do not touch source files (e.g. CONTRIBUTORS).
+ for _, f := range strings.Split(log.Files, " ") {
+ if (strings.HasPrefix(f, "include") || strings.HasPrefix(f, "src")) &&
+ !strings.HasSuffix(f, "_test.go") && !strings.Contains(f, "testdata") {
+ return true
+ }
+ }
+ return false
+}
diff --git a/llgo/third_party/go.tools/dashboard/builder/doc.go b/llgo/third_party/go.tools/dashboard/builder/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..51928617044d0c68facf7732534a62653f187a3d
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/builder/doc.go
@@ -0,0 +1,58 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+
+Go Builder is a continuous build client for the Go project.
+It integrates with the Go Dashboard AppEngine application.
+
+Go Builder is intended to run continuously as a background process.
+
+It periodically pulls updates from the Go Mercurial repository.
+
+When a newer revision is found, Go Builder creates a clone of the repository,
+runs all.bash, and reports build success or failure to the Go Dashboard.
+
+For a release revision (a change description that matches "release.YYYY-MM-DD"),
+Go Builder will create a tar.gz archive of the GOROOT and deliver it to the
+Go Google Code project's downloads section.
+
+Usage:
+
+ gobuilder goos-goarch...
+
+ Several goos-goarch combinations can be provided, and the builder will
+ build them in serial.
+
+Optional flags:
+
+ -dashboard="godashboard.appspot.com": Go Dashboard Host
+ The location of the Go Dashboard application to which Go Builder will
+ report its results.
+
+ -release: Build and deliver binary release archive
+
+ -rev=N: Build revision N and exit
+
+ -cmd="./all.bash": Build command (specify absolute or relative to go/src)
+
+ -v: Verbose logging
+
+ -external: External package builder mode (will not report Go build
+ state to dashboard or issue releases)
+
+The key file should be located at $HOME/.gobuildkey or, for a builder-specific
+key, $HOME/.gobuildkey-$BUILDER (eg, $HOME/.gobuildkey-linux-amd64).
+
+The build key file is a text file of the format:
+
+ godashboard-key
+ googlecode-username
+ googlecode-password
+
+If the Google Code credentials are not provided the archival step
+will be skipped.
+
+*/
+package main
diff --git a/llgo/third_party/go.tools/dashboard/builder/env.go b/llgo/third_party/go.tools/dashboard/builder/env.go
new file mode 100644
index 0000000000000000000000000000000000000000..c4b095b790197b8e4a1df24578435896d693c988
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/builder/env.go
@@ -0,0 +1,281 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+
+ "llvm.org/llgo/third_party/go.tools/go/vcs"
+)
+
+// builderEnv represents the environment that a Builder will run tests in.
+type builderEnv interface {
+ // setup sets up the builder environment and returns the directory to run the buildCmd in.
+ setup(repo *Repo, workpath, hash string, envv []string) (string, error)
+}
+
+// goEnv represents the builderEnv for the main Go repo.
+type goEnv struct {
+ goos, goarch string
+}
+
+func (b *Builder) envv() []string {
+ if runtime.GOOS == "windows" {
+ return b.envvWindows()
+ }
+
+ var e []string
+ if *buildTool == "go" {
+ e = []string{
+ "GOOS=" + b.goos,
+ "GOARCH=" + b.goarch,
+ "GOROOT_FINAL=/usr/local/go",
+ }
+ switch b.goos {
+ case "android", "nacl":
+ // Cross compile.
+ default:
+ // If we are building, for example, linux/386 on a linux/amd64 machine we want to
+ // make sure that the whole build is done as a if this were compiled on a real
+ // linux/386 machine. In other words, we want to not do a cross compilation build.
+ // To do this we set GOHOSTOS and GOHOSTARCH to override the detection in make.bash.
+ //
+ // The exception to this rule is when we are doing nacl/android builds. These are by
+ // definition always cross compilation, and we have support built into cmd/go to be
+ // able to handle this case.
+ e = append(e, "GOHOSTOS="+b.goos, "GOHOSTARCH="+b.goarch)
+ }
+ }
+
+ for _, k := range extraEnv() {
+ if s, ok := getenvOk(k); ok {
+ e = append(e, k+"="+s)
+ }
+ }
+ return e
+}
+
+func (b *Builder) envvWindows() []string {
+ var start map[string]string
+ if *buildTool == "go" {
+ start = map[string]string{
+ "GOOS": b.goos,
+ "GOHOSTOS": b.goos,
+ "GOARCH": b.goarch,
+ "GOHOSTARCH": b.goarch,
+ "GOROOT_FINAL": `c:\go`,
+ "GOBUILDEXIT": "1", // exit all.bat with completion status.
+ }
+ }
+
+ for _, name := range extraEnv() {
+ if s, ok := getenvOk(name); ok {
+ start[name] = s
+ }
+ }
+ if b.goos == "windows" {
+ switch b.goarch {
+ case "amd64":
+ start["PATH"] = `c:\TDM-GCC-64\bin;` + start["PATH"]
+ case "386":
+ start["PATH"] = `c:\TDM-GCC-32\bin;` + start["PATH"]
+ }
+ }
+ skip := map[string]bool{
+ "GOBIN": true,
+ "GOPATH": true,
+ "GOROOT": true,
+ "INCLUDE": true,
+ "LIB": true,
+ }
+ var e []string
+ for name, v := range start {
+ e = append(e, name+"="+v)
+ skip[name] = true
+ }
+ for _, kv := range os.Environ() {
+ s := strings.SplitN(kv, "=", 2)
+ name := strings.ToUpper(s[0])
+ switch {
+ case name == "":
+ // variables, like "=C:=C:\", just copy them
+ e = append(e, kv)
+ case !skip[name]:
+ e = append(e, kv)
+ skip[name] = true
+ }
+ }
+ return e
+}
+
+// setup for a goEnv clones the main go repo to workpath/go at the provided hash
+// and returns the path workpath/go/src, the location of all go build scripts.
+func (env *goEnv) setup(repo *Repo, workpath, hash string, envv []string) (string, error) {
+ goworkpath := filepath.Join(workpath, "go")
+ if err := repo.Export(goworkpath, hash); err != nil {
+ return "", fmt.Errorf("error exporting repository: %s", err)
+ }
+ // Write out VERSION file if it does not already exist.
+ vFile := filepath.Join(goworkpath, "VERSION")
+ if _, err := os.Stat(vFile); os.IsNotExist(err) {
+ if err := ioutil.WriteFile(vFile, []byte(hash), 0644); err != nil {
+ return "", fmt.Errorf("error writing VERSION file: %s", err)
+ }
+ }
+ return filepath.Join(goworkpath, "src"), nil
+}
+
+// gccgoEnv represents the builderEnv for the gccgo compiler.
+type gccgoEnv struct{}
+
+// setup for a gccgoEnv clones the gofrontend repo to workpath/go at the hash
+// and clones the latest GCC branch to repo.Path/gcc. The gccgo sources are
+// replaced with the updated sources in the gofrontend repo and gcc gets
+// gets configured and built in workpath/gcc-objdir. The path to
+// workpath/gcc-objdir is returned.
+func (env *gccgoEnv) setup(repo *Repo, workpath, hash string, envv []string) (string, error) {
+ gccpath := filepath.Join(repo.Path, "gcc")
+
+ // get a handle to Git vcs.Cmd for pulling down GCC from the mirror.
+ git := vcs.ByCmd("git")
+
+ // only pull down gcc if we don't have a local copy.
+ if _, err := os.Stat(gccpath); err != nil {
+ if err := timeout(*cmdTimeout, func() error {
+ // pull down a working copy of GCC.
+ return git.Create(gccpath, *gccPath)
+ }); err != nil {
+ return "", err
+ }
+ }
+
+ if err := git.Download(gccpath); err != nil {
+ return "", err
+ }
+
+ // get the modified files for this commit.
+
+ var buf bytes.Buffer
+ if err := run(exec.Command("hg", "status", "--no-status", "--change", hash),
+ allOutput(&buf), runDir(repo.Path), runEnv(envv)); err != nil {
+ return "", fmt.Errorf("Failed to find the modified files for %s: %s", hash, err)
+ }
+ modifiedFiles := strings.Split(buf.String(), "\n")
+ var isMirrored bool
+ for _, f := range modifiedFiles {
+ if strings.HasPrefix(f, "go/") || strings.HasPrefix(f, "libgo/") {
+ isMirrored = true
+ break
+ }
+ }
+
+ // use git log to find the corresponding commit to sync to in the gcc mirror.
+ // If the files modified in the gofrontend are mirrored to gcc, we expect a
+ // commit with a similar description in the gcc mirror. If the files modified are
+ // not mirrored, e.g. in support/, we can sync to the most recent gcc commit that
+ // occurred before those files were modified to verify gccgo's status at that point.
+ logCmd := []string{
+ "log",
+ "-1",
+ "--format=%H",
+ }
+ var errMsg string
+ if isMirrored {
+ commitDesc, err := repo.Master.VCS.LogAtRev(repo.Path, hash, "{desc|firstline|escape}")
+ if err != nil {
+ return "", err
+ }
+
+ quotedDesc := regexp.QuoteMeta(string(commitDesc))
+ logCmd = append(logCmd, "--grep", quotedDesc, "--regexp-ignore-case", "--extended-regexp")
+ errMsg = fmt.Sprintf("Failed to find a commit with a similar description to '%s'", string(commitDesc))
+ } else {
+ commitDate, err := repo.Master.VCS.LogAtRev(repo.Path, hash, "{date|rfc3339date}")
+ if err != nil {
+ return "", err
+ }
+
+ logCmd = append(logCmd, "--before", string(commitDate))
+ errMsg = fmt.Sprintf("Failed to find a commit before '%s'", string(commitDate))
+ }
+
+ buf.Reset()
+ if err := run(exec.Command("git", logCmd...), runEnv(envv), allOutput(&buf), runDir(gccpath)); err != nil {
+ return "", fmt.Errorf("%s: %s", errMsg, err)
+ }
+ gccRev := buf.String()
+ if gccRev == "" {
+ return "", fmt.Errorf(errMsg)
+ }
+
+ // checkout gccRev
+ // TODO(cmang): Fix this to work in parallel mode.
+ if err := run(exec.Command("git", "reset", "--hard", strings.TrimSpace(gccRev)), runEnv(envv), runDir(gccpath)); err != nil {
+ return "", fmt.Errorf("Failed to checkout commit at revision %s: %s", gccRev, err)
+ }
+
+ // make objdir to work in
+ gccobjdir := filepath.Join(workpath, "gcc-objdir")
+ if err := os.Mkdir(gccobjdir, mkdirPerm); err != nil {
+ return "", err
+ }
+
+ // configure GCC with substituted gofrontend and libgo
+ if err := run(exec.Command(filepath.Join(gccpath, "configure"),
+ "--enable-languages=c,c++,go",
+ "--disable-bootstrap",
+ "--disable-multilib",
+ ), runEnv(envv), runDir(gccobjdir)); err != nil {
+ return "", fmt.Errorf("Failed to configure GCC: %v", err)
+ }
+
+ // build gcc
+ if err := run(exec.Command("make"), runTimeout(*buildTimeout), runEnv(envv), runDir(gccobjdir)); err != nil {
+ return "", fmt.Errorf("Failed to build GCC: %s", err)
+ }
+
+ return gccobjdir, nil
+}
+
+func getenvOk(k string) (v string, ok bool) {
+ v = os.Getenv(k)
+ if v != "" {
+ return v, true
+ }
+ keq := k + "="
+ for _, kv := range os.Environ() {
+ if kv == keq {
+ return "", true
+ }
+ }
+ return "", false
+}
+
+// extraEnv returns environment variables that need to be copied from
+// the gobuilder's environment to the envv of its subprocesses.
+func extraEnv() []string {
+ extra := []string{
+ "GOARM",
+ "GO386",
+ "CGO_ENABLED",
+ "CC",
+ "CC_FOR_TARGET",
+ "PATH",
+ "TMPDIR",
+ "USER",
+ }
+ if runtime.GOOS == "plan9" {
+ extra = append(extra, "objtype", "cputype", "path")
+ }
+ return extra
+}
diff --git a/llgo/third_party/go.tools/dashboard/builder/exec.go b/llgo/third_party/go.tools/dashboard/builder/exec.go
new file mode 100644
index 0000000000000000000000000000000000000000..c40301f7adfd4a0dec6ba49f3f0008ddcc146a04
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/builder/exec.go
@@ -0,0 +1,98 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "os/exec"
+ "time"
+)
+
+// run runs a command with optional arguments.
+func run(cmd *exec.Cmd, opts ...runOpt) error {
+ a := runArgs{cmd, *cmdTimeout}
+ for _, opt := range opts {
+ opt.modArgs(&a)
+ }
+ if *verbose {
+ log.Printf("running %v", a.cmd.Args)
+ }
+ if err := cmd.Start(); err != nil {
+ return err
+ }
+ err := timeout(a.timeout, cmd.Wait)
+ if _, ok := err.(timeoutError); ok {
+ cmd.Process.Kill()
+ }
+ return err
+}
+
+// Zero or more runOpts can be passed to run to modify the command
+// before it's run.
+type runOpt interface {
+ modArgs(*runArgs)
+}
+
+// allOutput sends both stdout and stderr to w.
+func allOutput(w io.Writer) optFunc {
+ return func(a *runArgs) {
+ a.cmd.Stdout = w
+ a.cmd.Stderr = w
+ }
+}
+
+func runTimeout(timeout time.Duration) optFunc {
+ return func(a *runArgs) {
+ a.timeout = timeout
+ }
+}
+
+func runDir(dir string) optFunc {
+ return func(a *runArgs) {
+ a.cmd.Dir = dir
+ }
+}
+
+func runEnv(env []string) optFunc {
+ return func(a *runArgs) {
+ a.cmd.Env = env
+ }
+}
+
+// timeout runs f and returns its error value, or if the function does not
+// complete before the provided duration it returns a timeout error.
+func timeout(d time.Duration, f func() error) error {
+ errc := make(chan error, 1)
+ go func() {
+ errc <- f()
+ }()
+ t := time.NewTimer(d)
+ defer t.Stop()
+ select {
+ case <-t.C:
+ return timeoutError(d)
+ case err := <-errc:
+ return err
+ }
+}
+
+type timeoutError time.Duration
+
+func (e timeoutError) Error() string {
+ return fmt.Sprintf("timed out after %v", time.Duration(e))
+}
+
+// optFunc implements runOpt with a function, like http.HandlerFunc.
+type optFunc func(*runArgs)
+
+func (f optFunc) modArgs(a *runArgs) { f(a) }
+
+// internal detail to exec.go:
+type runArgs struct {
+ cmd *exec.Cmd
+ timeout time.Duration
+}
diff --git a/llgo/third_party/go.tools/dashboard/builder/filemutex_flock.go b/llgo/third_party/go.tools/dashboard/builder/filemutex_flock.go
new file mode 100644
index 0000000000000000000000000000000000000000..68851b8df006eb8204ad532a27251b62a1bc323f
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/builder/filemutex_flock.go
@@ -0,0 +1,66 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package main
+
+import (
+ "sync"
+ "syscall"
+)
+
+// FileMutex is similar to sync.RWMutex, but also synchronizes across processes.
+// This implementation is based on flock syscall.
+type FileMutex struct {
+ mu sync.RWMutex
+ fd int
+}
+
+func MakeFileMutex(filename string) *FileMutex {
+ if filename == "" {
+ return &FileMutex{fd: -1}
+ }
+ fd, err := syscall.Open(filename, syscall.O_CREAT|syscall.O_RDONLY, mkdirPerm)
+ if err != nil {
+ panic(err)
+ }
+ return &FileMutex{fd: fd}
+}
+
+func (m *FileMutex) Lock() {
+ m.mu.Lock()
+ if m.fd != -1 {
+ if err := syscall.Flock(m.fd, syscall.LOCK_EX); err != nil {
+ panic(err)
+ }
+ }
+}
+
+func (m *FileMutex) Unlock() {
+ if m.fd != -1 {
+ if err := syscall.Flock(m.fd, syscall.LOCK_UN); err != nil {
+ panic(err)
+ }
+ }
+ m.mu.Unlock()
+}
+
+func (m *FileMutex) RLock() {
+ m.mu.RLock()
+ if m.fd != -1 {
+ if err := syscall.Flock(m.fd, syscall.LOCK_SH); err != nil {
+ panic(err)
+ }
+ }
+}
+
+func (m *FileMutex) RUnlock() {
+ if m.fd != -1 {
+ if err := syscall.Flock(m.fd, syscall.LOCK_UN); err != nil {
+ panic(err)
+ }
+ }
+ m.mu.RUnlock()
+}
diff --git a/llgo/third_party/go.tools/dashboard/builder/filemutex_local.go b/llgo/third_party/go.tools/dashboard/builder/filemutex_local.go
new file mode 100644
index 0000000000000000000000000000000000000000..68cfb62f3ba178290d88d576c4116f194344acf8
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/builder/filemutex_local.go
@@ -0,0 +1,27 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9 solaris
+
+package main
+
+import (
+ "log"
+ "sync"
+)
+
+// FileMutex is similar to sync.RWMutex, but also synchronizes across processes.
+// This implementation is a fallback that does not actually provide inter-process synchronization.
+type FileMutex struct {
+ sync.RWMutex
+}
+
+func MakeFileMutex(filename string) *FileMutex {
+ return &FileMutex{}
+}
+
+func init() {
+ log.Printf("WARNING: using fake file mutex." +
+ " Don't run more than one of these at once!!!")
+}
diff --git a/llgo/third_party/go.tools/dashboard/builder/filemutex_windows.go b/llgo/third_party/go.tools/dashboard/builder/filemutex_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..1f058b2380db70add8ba96591d49dc7813e96ab2
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/builder/filemutex_windows.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+var (
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+ procLockFileEx = modkernel32.NewProc("LockFileEx")
+ procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
+)
+
+const (
+ INVALID_FILE_HANDLE = ^syscall.Handle(0)
+ LOCKFILE_EXCLUSIVE_LOCK = 2
+)
+
+func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
+ r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
+ if r1 == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
+ r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+// FileMutex is similar to sync.RWMutex, but also synchronizes across processes.
+// This implementation is based on flock syscall.
+type FileMutex struct {
+ mu sync.RWMutex
+ fd syscall.Handle
+}
+
+func MakeFileMutex(filename string) *FileMutex {
+ if filename == "" {
+ return &FileMutex{fd: INVALID_FILE_HANDLE}
+ }
+ fd, err := syscall.CreateFile(&(syscall.StringToUTF16(filename)[0]), syscall.GENERIC_READ|syscall.GENERIC_WRITE,
+ syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0)
+ if err != nil {
+ panic(err)
+ }
+ return &FileMutex{fd: fd}
+}
+
+func (m *FileMutex) Lock() {
+ m.mu.Lock()
+ if m.fd != INVALID_FILE_HANDLE {
+ var ol syscall.Overlapped
+ if err := lockFileEx(m.fd, LOCKFILE_EXCLUSIVE_LOCK, 0, 1, 0, &ol); err != nil {
+ panic(err)
+ }
+ }
+}
+
+func (m *FileMutex) Unlock() {
+ if m.fd != INVALID_FILE_HANDLE {
+ var ol syscall.Overlapped
+ if err := unlockFileEx(m.fd, 0, 1, 0, &ol); err != nil {
+ panic(err)
+ }
+ }
+ m.mu.Unlock()
+}
+
+func (m *FileMutex) RLock() {
+ m.mu.RLock()
+ if m.fd != INVALID_FILE_HANDLE {
+ var ol syscall.Overlapped
+ if err := lockFileEx(m.fd, 0, 0, 1, 0, &ol); err != nil {
+ panic(err)
+ }
+ }
+}
+
+func (m *FileMutex) RUnlock() {
+ if m.fd != INVALID_FILE_HANDLE {
+ var ol syscall.Overlapped
+ if err := unlockFileEx(m.fd, 0, 1, 0, &ol); err != nil {
+ panic(err)
+ }
+ }
+ m.mu.RUnlock()
+}
diff --git a/llgo/third_party/go.tools/dashboard/builder/http.go b/llgo/third_party/go.tools/dashboard/builder/http.go
new file mode 100644
index 0000000000000000000000000000000000000000..3fbad3a87102063d2f4340fd9d801f26fbe08624
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/builder/http.go
@@ -0,0 +1,219 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "net/url"
+ "time"
+)
+
+type obj map[string]interface{}
+
+// dash runs the given method and command on the dashboard.
+// If args is non-nil it is encoded as the URL query string.
+// If req is non-nil it is JSON-encoded and passed as the body of the HTTP POST.
+// If resp is non-nil the server's response is decoded into the value pointed
+// to by resp (resp must be a pointer).
+func dash(meth, cmd string, args url.Values, req, resp interface{}) error {
+ var r *http.Response
+ var err error
+ if *verbose {
+ log.Println("dash <-", meth, cmd, args, req)
+ }
+ cmd = *dashboard + "/" + cmd
+ if len(args) > 0 {
+ cmd += "?" + args.Encode()
+ }
+ switch meth {
+ case "GET":
+ if req != nil {
+ log.Panicf("%s to %s with req", meth, cmd)
+ }
+ r, err = http.Get(cmd)
+ case "POST":
+ var body io.Reader
+ if req != nil {
+ b, err := json.Marshal(req)
+ if err != nil {
+ return err
+ }
+ body = bytes.NewBuffer(b)
+ }
+ r, err = http.Post(cmd, "text/json", body)
+ default:
+ log.Panicf("%s: invalid method %q", cmd, meth)
+ panic("invalid method: " + meth)
+ }
+ if err != nil {
+ return err
+ }
+ defer r.Body.Close()
+ if r.StatusCode != http.StatusOK {
+ return fmt.Errorf("bad http response: %v", r.Status)
+ }
+ body := new(bytes.Buffer)
+ if _, err := body.ReadFrom(r.Body); err != nil {
+ return err
+ }
+
+ // Read JSON-encoded Response into provided resp
+ // and return an error if present.
+ var result = struct {
+ Response interface{}
+ Error string
+ }{
+ // Put the provided resp in here as it can be a pointer to
+ // some value we should unmarshal into.
+ Response: resp,
+ }
+ if err = json.Unmarshal(body.Bytes(), &result); err != nil {
+ log.Printf("json unmarshal %#q: %s\n", body.Bytes(), err)
+ return err
+ }
+ if *verbose {
+ log.Println("dash ->", result)
+ }
+ if result.Error != "" {
+ return errors.New(result.Error)
+ }
+
+ return nil
+}
+
+// todo returns the next hash to build or benchmark.
+func (b *Builder) todo(kinds []string, pkg, goHash string) (kind, rev string, benchs []string, err error) {
+ args := url.Values{
+ "builder": {b.name},
+ "packagePath": {pkg},
+ "goHash": {goHash},
+ }
+ for _, k := range kinds {
+ args.Add("kind", k)
+ }
+ var resp *struct {
+ Kind string
+ Data struct {
+ Hash string
+ PerfResults []string
+ }
+ }
+ if err = dash("GET", "todo", args, nil, &resp); err != nil {
+ return
+ }
+ if resp == nil {
+ return
+ }
+ if *verbose {
+ fmt.Printf("dash resp: %+v\n", *resp)
+ }
+ for _, k := range kinds {
+ if k == resp.Kind {
+ return resp.Kind, resp.Data.Hash, resp.Data.PerfResults, nil
+ }
+ }
+ err = fmt.Errorf("expecting Kinds %q, got %q", kinds, resp.Kind)
+ return
+}
+
+// recordResult sends build results to the dashboard
+func (b *Builder) recordResult(ok bool, pkg, hash, goHash, buildLog string, runTime time.Duration) error {
+ if !*report {
+ return nil
+ }
+ req := obj{
+ "Builder": b.name,
+ "PackagePath": pkg,
+ "Hash": hash,
+ "GoHash": goHash,
+ "OK": ok,
+ "Log": buildLog,
+ "RunTime": runTime,
+ }
+ args := url.Values{"key": {b.key}, "builder": {b.name}}
+ return dash("POST", "result", args, req, nil)
+}
+
+// Result of running a single benchmark on a single commit.
+type PerfResult struct {
+ Builder string
+ Benchmark string
+ Hash string
+ OK bool
+ Metrics []PerfMetric
+ Artifacts []PerfArtifact
+}
+
+type PerfMetric struct {
+ Type string
+ Val uint64
+}
+
+type PerfArtifact struct {
+ Type string
+ Body string
+}
+
+// recordPerfResult sends benchmarking results to the dashboard
+func (b *Builder) recordPerfResult(req *PerfResult) error {
+ if !*report {
+ return nil
+ }
+ req.Builder = b.name
+ args := url.Values{"key": {b.key}, "builder": {b.name}}
+ return dash("POST", "perf-result", args, req, nil)
+}
+
+func postCommit(key, pkg string, l *HgLog) error {
+ if !*report {
+ return nil
+ }
+ t, err := time.Parse(time.RFC3339, l.Date)
+ if err != nil {
+ return fmt.Errorf("parsing %q: %v", l.Date, t)
+ }
+ return dash("POST", "commit", url.Values{"key": {key}}, obj{
+ "PackagePath": pkg,
+ "Hash": l.Hash,
+ "ParentHash": l.Parent,
+ "Time": t.Format(time.RFC3339),
+ "User": l.Author,
+ "Desc": l.Desc,
+ "NeedsBenchmarking": l.bench,
+ }, nil)
+}
+
+func dashboardCommit(pkg, hash string) bool {
+ err := dash("GET", "commit", url.Values{
+ "packagePath": {pkg},
+ "hash": {hash},
+ }, nil, nil)
+ return err == nil
+}
+
+func dashboardPackages(kind string) []string {
+ args := url.Values{"kind": []string{kind}}
+ var resp []struct {
+ Path string
+ }
+ if err := dash("GET", "packages", args, nil, &resp); err != nil {
+ log.Println("dashboardPackages:", err)
+ return nil
+ }
+ if *verbose {
+ fmt.Printf("dash resp: %+v\n", resp)
+ }
+ var pkgs []string
+ for _, r := range resp {
+ pkgs = append(pkgs, r.Path)
+ }
+ return pkgs
+}
diff --git a/llgo/third_party/go.tools/dashboard/builder/main.go b/llgo/third_party/go.tools/dashboard/builder/main.go
new file mode 100644
index 0000000000000000000000000000000000000000..6635fc5f1584117e91593ffbebf5efd7b6a5eed1
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/builder/main.go
@@ -0,0 +1,831 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "llvm.org/llgo/third_party/go.tools/go/vcs"
+)
+
+const (
+ codeProject = "go"
+ codePyScript = "misc/dashboard/googlecode_upload.py"
+ gofrontendImportPath = "code.google.com/p/gofrontend"
+ mkdirPerm = 0750
+ waitInterval = 30 * time.Second // time to wait before checking for new revs
+ pkgBuildInterval = 24 * time.Hour // rebuild packages every 24 hours
+)
+
+type Builder struct {
+ goroot *Repo
+ name string
+ goos, goarch string
+ key string
+ env builderEnv
+ // Last benchmarking workpath. We reuse it, if do successive benchmarks on the same commit.
+ lastWorkpath string
+}
+
+var (
+ doBuild = flag.Bool("build", true, "Build and test packages")
+ doBench = flag.Bool("bench", false, "Run benchmarks")
+ buildroot = flag.String("buildroot", defaultBuildRoot(), "Directory under which to build")
+ dashboard = flag.String("dashboard", "https://build.golang.org", "Dashboard app base path")
+ buildRelease = flag.Bool("release", false, "Build and upload binary release archives")
+ buildRevision = flag.String("rev", "", "Build specified revision and exit")
+ buildCmd = flag.String("cmd", filepath.Join(".", allCmd), "Build command (specify relative to go/src/)")
+ buildTool = flag.String("tool", "go", "Tool to build.")
+ gcPath = flag.String("gcpath", "code.google.com/p/go", "Path to download gc from")
+ gccPath = flag.String("gccpath", "https://github.com/mirrors/gcc.git", "Path to download gcc from")
+ benchPath = flag.String("benchpath", "golang.org/x/benchmarks/bench", "Path to download benchmarks from")
+ failAll = flag.Bool("fail", false, "fail all builds")
+ parallel = flag.Bool("parallel", false, "Build multiple targets in parallel")
+ buildTimeout = flag.Duration("buildTimeout", 60*time.Minute, "Maximum time to wait for builds and tests")
+ cmdTimeout = flag.Duration("cmdTimeout", 10*time.Minute, "Maximum time to wait for an external command")
+ commitInterval = flag.Duration("commitInterval", 1*time.Minute, "Time to wait between polling for new commits (0 disables commit poller)")
+ commitWatch = flag.Bool("commitWatch", false, "run the commit watch loop only (do no builds)")
+ benchNum = flag.Int("benchnum", 5, "Run each benchmark that many times")
+ benchTime = flag.Duration("benchtime", 5*time.Second, "Benchmarking time for a single benchmark run")
+ benchMem = flag.Int("benchmem", 64, "Approx RSS value to aim at in benchmarks, in MB")
+ fileLock = flag.String("filelock", "", "File to lock around benchmaring (synchronizes several builders)")
+ verbose = flag.Bool("v", false, "verbose")
+ report = flag.Bool("report", true, "whether to report results to the dashboard")
+)
+
+var (
+ binaryTagRe = regexp.MustCompile(`^(release\.r|weekly\.)[0-9\-.]+`)
+ releaseRe = regexp.MustCompile(`^release\.r[0-9\-.]+`)
+ allCmd = "all" + suffix
+ makeCmd = "make" + suffix
+ raceCmd = "race" + suffix
+ cleanCmd = "clean" + suffix
+ suffix = defaultSuffix()
+ exeExt = defaultExeExt()
+
+ benchCPU = CpuList([]int{1})
+ benchAffinity = CpuList([]int{})
+ benchMutex *FileMutex // Isolates benchmarks from other activities
+)
+
+// CpuList is used as flag.Value for -benchcpu flag.
+type CpuList []int
+
+func (cl *CpuList) String() string {
+ str := ""
+ for _, cpu := range *cl {
+ if str == "" {
+ str = strconv.Itoa(cpu)
+ } else {
+ str += fmt.Sprintf(",%v", cpu)
+ }
+ }
+ return str
+}
+
+func (cl *CpuList) Set(str string) error {
+ *cl = []int{}
+ for _, val := range strings.Split(str, ",") {
+ val = strings.TrimSpace(val)
+ if val == "" {
+ continue
+ }
+ cpu, err := strconv.Atoi(val)
+ if err != nil || cpu <= 0 {
+ return fmt.Errorf("%v is a bad value for GOMAXPROCS", val)
+ }
+ *cl = append(*cl, cpu)
+ }
+ if len(*cl) == 0 {
+ *cl = append(*cl, 1)
+ }
+ return nil
+}
+
+func main() {
+ flag.Var(&benchCPU, "benchcpu", "Comma-delimited list of GOMAXPROCS values for benchmarking")
+ flag.Var(&benchAffinity, "benchaffinity", "Comma-delimited list of affinity values for benchmarking")
+ flag.Usage = func() {
+ fmt.Fprintf(os.Stderr, "usage: %s goos-goarch...\n", os.Args[0])
+ flag.PrintDefaults()
+ os.Exit(2)
+ }
+ flag.Parse()
+ if len(flag.Args()) == 0 && !*commitWatch {
+ flag.Usage()
+ }
+
+ vcs.ShowCmd = *verbose
+ vcs.Verbose = *verbose
+
+ benchMutex = MakeFileMutex(*fileLock)
+
+ rr, err := repoForTool()
+ if err != nil {
+ log.Fatal("Error finding repository:", err)
+ }
+ rootPath := filepath.Join(*buildroot, "goroot")
+ goroot := &Repo{
+ Path: rootPath,
+ Master: rr,
+ }
+
+ // set up work environment, use existing environment if possible
+ if goroot.Exists() || *failAll {
+ log.Print("Found old workspace, will use it")
+ } else {
+ if err := os.RemoveAll(*buildroot); err != nil {
+ log.Fatalf("Error removing build root (%s): %s", *buildroot, err)
+ }
+ if err := os.Mkdir(*buildroot, mkdirPerm); err != nil {
+ log.Fatalf("Error making build root (%s): %s", *buildroot, err)
+ }
+ var err error
+ goroot, err = RemoteRepo(goroot.Master.Root, rootPath)
+ if err != nil {
+ log.Fatalf("Error creating repository with url (%s): %s", goroot.Master.Root, err)
+ }
+
+ goroot, err = goroot.Clone(goroot.Path, "tip")
+ if err != nil {
+ log.Fatal("Error cloning repository:", err)
+ }
+ }
+
+ // set up builders
+ builders := make([]*Builder, len(flag.Args()))
+ for i, name := range flag.Args() {
+ b, err := NewBuilder(goroot, name)
+ if err != nil {
+ log.Fatal(err)
+ }
+ builders[i] = b
+ }
+
+ if *failAll {
+ failMode(builders)
+ return
+ }
+
+ // if specified, build revision and return
+ if *buildRevision != "" {
+ hash, err := goroot.FullHash(*buildRevision)
+ if err != nil {
+ log.Fatal("Error finding revision: ", err)
+ }
+ var exitErr error
+ for _, b := range builders {
+ if err := b.buildHash(hash); err != nil {
+ log.Println(err)
+ exitErr = err
+ }
+ }
+ if exitErr != nil && !*report {
+ // This mode (-report=false) is used for
+ // testing Docker images, making sure the
+ // environment is correctly configured. For
+ // testing, we want a non-zero exit status, as
+ // returned by log.Fatal:
+ log.Fatal("Build error.")
+ }
+ return
+ }
+
+ if !*doBuild && !*doBench {
+ fmt.Fprintf(os.Stderr, "Nothing to do, exiting (specify either -build or -bench or both)\n")
+ os.Exit(2)
+ }
+
+ // Start commit watcher.
+ if *commitWatch {
+ commitWatcher(goroot)
+ return
+ }
+
+ // go continuous build mode
+ // check for new commits and build them
+ benchMutex.RLock()
+ for {
+ built := false
+ t := time.Now()
+ if *parallel {
+ done := make(chan bool)
+ for _, b := range builders {
+ go func(b *Builder) {
+ done <- b.buildOrBench()
+ }(b)
+ }
+ for _ = range builders {
+ built = <-done || built
+ }
+ } else {
+ for _, b := range builders {
+ built = b.buildOrBench() || built
+ }
+ }
+ // sleep if there was nothing to build
+ benchMutex.RUnlock()
+ if !built {
+ time.Sleep(waitInterval)
+ }
+ benchMutex.RLock()
+ // sleep if we're looping too fast.
+ dt := time.Now().Sub(t)
+ if dt < waitInterval {
+ time.Sleep(waitInterval - dt)
+ }
+ }
+}
+
+// go continuous fail mode
+// check for new commits and FAIL them
+func failMode(builders []*Builder) {
+ for {
+ built := false
+ for _, b := range builders {
+ built = b.failBuild() || built
+ }
+ // stop if there was nothing to fail
+ if !built {
+ break
+ }
+ }
+}
+
+func NewBuilder(goroot *Repo, name string) (*Builder, error) {
+ b := &Builder{
+ goroot: goroot,
+ name: name,
+ }
+
+ // get builderEnv for this tool
+ var err error
+ if b.env, err = b.builderEnv(name); err != nil {
+ return nil, err
+ }
+ if *report {
+ err = b.setKey()
+ }
+ return b, err
+}
+
+func (b *Builder) setKey() error {
+ // read keys from keyfile
+ fn := ""
+ switch runtime.GOOS {
+ case "plan9":
+ fn = os.Getenv("home")
+ case "windows":
+ fn = os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
+ default:
+ fn = os.Getenv("HOME")
+ }
+ fn = filepath.Join(fn, ".gobuildkey")
+ if s := fn + "-" + b.name; isFile(s) { // builder-specific file
+ fn = s
+ }
+ c, err := ioutil.ReadFile(fn)
+ if err != nil {
+ // If the on-disk file doesn't exist, also try the
+ // Google Compute Engine metadata.
+ if v := gceProjectMetadata("buildkey-" + b.name); v != "" {
+ b.key = v
+ return nil
+ }
+ return fmt.Errorf("readKeys %s (%s): %s", b.name, fn, err)
+ }
+ b.key = string(bytes.TrimSpace(bytes.SplitN(c, []byte("\n"), 2)[0]))
+ return nil
+}
+
+func gceProjectMetadata(attr string) string {
+ client := &http.Client{
+ Transport: &http.Transport{
+ Dial: (&net.Dialer{
+ Timeout: 750 * time.Millisecond,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ ResponseHeaderTimeout: 750 * time.Millisecond,
+ },
+ }
+ req, _ := http.NewRequest("GET", "http://metadata.google.internal/computeMetadata/v1/project/attributes/"+attr, nil)
+ req.Header.Set("Metadata-Flavor", "Google")
+ res, err := client.Do(req)
+ if err != nil {
+ return ""
+ }
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ return ""
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return ""
+ }
+ return string(bytes.TrimSpace(slurp))
+}
+
+// builderEnv returns the builderEnv for this buildTool.
+func (b *Builder) builderEnv(name string) (builderEnv, error) {
+ // get goos/goarch from builder string
+ s := strings.SplitN(b.name, "-", 3)
+ if len(s) < 2 {
+ return nil, fmt.Errorf("unsupported builder form: %s", name)
+ }
+ b.goos = s[0]
+ b.goarch = s[1]
+
+ switch *buildTool {
+ case "go":
+ return &goEnv{
+ goos: s[0],
+ goarch: s[1],
+ }, nil
+ case "gccgo":
+ return &gccgoEnv{}, nil
+ default:
+ return nil, fmt.Errorf("unsupported build tool: %s", *buildTool)
+ }
+}
+
+// buildCmd returns the build command to invoke.
+// Builders which contain the string '-race' in their
+// name will override *buildCmd and return raceCmd.
+func (b *Builder) buildCmd() string {
+ if strings.Contains(b.name, "-race") {
+ return raceCmd
+ }
+ return *buildCmd
+}
+
+// buildOrBench checks for a new commit for this builder
+// and builds or benchmarks it if one is found.
+// It returns true if a build/benchmark was attempted.
+func (b *Builder) buildOrBench() bool {
+ var kinds []string
+ if *doBuild {
+ kinds = append(kinds, "build-go-commit")
+ }
+ if *doBench {
+ kinds = append(kinds, "benchmark-go-commit")
+ }
+ kind, hash, benchs, err := b.todo(kinds, "", "")
+ if err != nil {
+ log.Println(err)
+ return false
+ }
+ if hash == "" {
+ return false
+ }
+ switch kind {
+ case "build-go-commit":
+ if err := b.buildHash(hash); err != nil {
+ log.Println(err)
+ }
+ return true
+ case "benchmark-go-commit":
+ if err := b.benchHash(hash, benchs); err != nil {
+ log.Println(err)
+ }
+ return true
+ default:
+ log.Printf("Unknown todo kind %v", kind)
+ return false
+ }
+}
+
+func (b *Builder) buildHash(hash string) error {
+ log.Println(b.name, "building", hash)
+
+ // create place in which to do work
+ workpath := filepath.Join(*buildroot, b.name+"-"+hash[:12])
+ if err := os.Mkdir(workpath, mkdirPerm); err != nil {
+ if err2 := removePath(workpath); err2 != nil {
+ return err
+ }
+ if err := os.Mkdir(workpath, mkdirPerm); err != nil {
+ return err
+ }
+ }
+ defer removePath(workpath)
+
+ buildLog, runTime, err := b.buildRepoOnHash(workpath, hash, b.buildCmd())
+ if err != nil {
+ // record failure
+ return b.recordResult(false, "", hash, "", buildLog, runTime)
+ }
+
+ // record success
+ if err = b.recordResult(true, "", hash, "", "", runTime); err != nil {
+ return fmt.Errorf("recordResult: %s", err)
+ }
+
+ // build sub-repositories
+ goRoot := filepath.Join(workpath, *buildTool)
+ goPath := workpath
+ b.buildSubrepos(goRoot, goPath, hash)
+
+ return nil
+}
+
+// buildRepoOnHash clones repo into workpath and builds it.
+func (b *Builder) buildRepoOnHash(workpath, hash, cmd string) (buildLog string, runTime time.Duration, err error) {
+ // Delete the previous workdir, if necessary
+ // (benchmarking code can execute several benchmarks in the same workpath).
+ if b.lastWorkpath != "" {
+ if b.lastWorkpath == workpath {
+ panic("workpath already exists: " + workpath)
+ }
+ removePath(b.lastWorkpath)
+ b.lastWorkpath = ""
+ }
+
+ // pull before cloning to ensure we have the revision
+ if err = b.goroot.Pull(); err != nil {
+ buildLog = err.Error()
+ return
+ }
+
+ // set up builder's environment.
+ srcDir, err := b.env.setup(b.goroot, workpath, hash, b.envv())
+ if err != nil {
+ buildLog = err.Error()
+ return
+ }
+
+ // build
+ var buildbuf bytes.Buffer
+ logfile := filepath.Join(workpath, "build.log")
+ f, err := os.Create(logfile)
+ if err != nil {
+ return err.Error(), 0, err
+ }
+ defer f.Close()
+ w := io.MultiWriter(f, &buildbuf)
+
+ // go's build command is a script relative to the srcDir, whereas
+ // gccgo's build command is usually "make check-go" in the srcDir.
+ if *buildTool == "go" {
+ if !filepath.IsAbs(cmd) {
+ cmd = filepath.Join(srcDir, cmd)
+ }
+ }
+
+ // naive splitting of command from its arguments:
+ args := strings.Split(cmd, " ")
+ c := exec.Command(args[0], args[1:]...)
+ c.Dir = srcDir
+ c.Env = b.envv()
+ if *verbose {
+ c.Stdout = io.MultiWriter(os.Stdout, w)
+ c.Stderr = io.MultiWriter(os.Stderr, w)
+ } else {
+ c.Stdout = w
+ c.Stderr = w
+ }
+
+ startTime := time.Now()
+ err = run(c, runTimeout(*buildTimeout))
+ runTime = time.Since(startTime)
+ if err != nil {
+ fmt.Fprintf(w, "Build complete, duration %v. Result: error: %v\n", runTime, err)
+ } else {
+ fmt.Fprintf(w, "Build complete, duration %v. Result: success\n", runTime)
+ }
+ return buildbuf.String(), runTime, err
+}
+
+// failBuild checks for a new commit for this builder
+// and fails it if one is found.
+// It returns true if a build was "attempted".
+func (b *Builder) failBuild() bool {
+ _, hash, _, err := b.todo([]string{"build-go-commit"}, "", "")
+ if err != nil {
+ log.Println(err)
+ return false
+ }
+ if hash == "" {
+ return false
+ }
+
+ log.Printf("fail %s %s\n", b.name, hash)
+
+ if err := b.recordResult(false, "", hash, "", "auto-fail mode run by "+os.Getenv("USER"), 0); err != nil {
+ log.Print(err)
+ }
+ return true
+}
+
+func (b *Builder) buildSubrepos(goRoot, goPath, goHash string) {
+ for _, pkg := range dashboardPackages("subrepo") {
+ // get the latest todo for this package
+ _, hash, _, err := b.todo([]string{"build-package"}, pkg, goHash)
+ if err != nil {
+ log.Printf("buildSubrepos %s: %v", pkg, err)
+ continue
+ }
+ if hash == "" {
+ continue
+ }
+
+ // build the package
+ if *verbose {
+ log.Printf("buildSubrepos %s: building %q", pkg, hash)
+ }
+ buildLog, err := b.buildSubrepo(goRoot, goPath, pkg, hash)
+ if err != nil {
+ if buildLog == "" {
+ buildLog = err.Error()
+ }
+ log.Printf("buildSubrepos %s: %v", pkg, err)
+ }
+
+ // record the result
+ err = b.recordResult(err == nil, pkg, hash, goHash, buildLog, 0)
+ if err != nil {
+ log.Printf("buildSubrepos %s: %v", pkg, err)
+ }
+ }
+}
+
+// buildSubrepo fetches the given package, updates it to the specified hash,
+// and runs 'go test -short pkg/...'. It returns the build log and any error.
+func (b *Builder) buildSubrepo(goRoot, goPath, pkg, hash string) (string, error) {
+ goTool := filepath.Join(goRoot, "bin", "go") + exeExt
+ env := append(b.envv(), "GOROOT="+goRoot, "GOPATH="+goPath)
+
+ // add $GOROOT/bin and $GOPATH/bin to PATH
+ for i, e := range env {
+ const p = "PATH="
+ if !strings.HasPrefix(e, p) {
+ continue
+ }
+ sep := string(os.PathListSeparator)
+ env[i] = p + filepath.Join(goRoot, "bin") + sep + filepath.Join(goPath, "bin") + sep + e[len(p):]
+ }
+
+ // HACK: check out to new sub-repo location instead of old location.
+ pkg = strings.Replace(pkg, "code.google.com/p/go.", "golang.org/x/", 1)
+
+ // fetch package and dependencies
+ var outbuf bytes.Buffer
+ err := run(exec.Command(goTool, "get", "-d", pkg+"/..."), runEnv(env), allOutput(&outbuf), runDir(goPath))
+ if err != nil {
+ return outbuf.String(), err
+ }
+ outbuf.Reset()
+
+ // hg update to the specified hash
+ pkgmaster, err := vcs.RepoRootForImportPath(pkg, *verbose)
+ if err != nil {
+ return "", fmt.Errorf("Error finding subrepo (%s): %s", pkg, err)
+ }
+ repo := &Repo{
+ Path: filepath.Join(goPath, "src", pkg),
+ Master: pkgmaster,
+ }
+ if err := repo.UpdateTo(hash); err != nil {
+ return "", err
+ }
+
+ // test the package
+ err = run(exec.Command(goTool, "test", "-short", pkg+"/..."),
+ runTimeout(*buildTimeout), runEnv(env), allOutput(&outbuf), runDir(goPath))
+ return outbuf.String(), err
+}
+
+// repoForTool returns the correct RepoRoot for the buildTool, or an error if
+// the tool is unknown.
+func repoForTool() (*vcs.RepoRoot, error) {
+ switch *buildTool {
+ case "go":
+ return vcs.RepoRootForImportPath(*gcPath, *verbose)
+ case "gccgo":
+ return vcs.RepoRootForImportPath(gofrontendImportPath, *verbose)
+ default:
+ return nil, fmt.Errorf("unknown build tool: %s", *buildTool)
+ }
+}
+
+func isDirectory(name string) bool {
+ s, err := os.Stat(name)
+ return err == nil && s.IsDir()
+}
+
+func isFile(name string) bool {
+ s, err := os.Stat(name)
+ return err == nil && !s.IsDir()
+}
+
+// commitWatcher polls hg for new commits and tells the dashboard about them.
+func commitWatcher(goroot *Repo) {
+ if *commitInterval == 0 {
+ log.Printf("commitInterval is 0; disabling commitWatcher")
+ return
+ }
+ if !*report {
+ log.Printf("-report is false; disabling commitWatcher")
+ return
+ }
+ // Create builder just to get master key.
+ b, err := NewBuilder(goroot, "mercurial-commit")
+ if err != nil {
+ log.Fatal(err)
+ }
+ key := b.key
+
+ benchMutex.RLock()
+ for {
+ if *verbose {
+ log.Printf("poll...")
+ }
+ // Main Go repository.
+ commitPoll(goroot, "", key)
+ // Go sub-repositories.
+ for _, pkg := range dashboardPackages("subrepo") {
+ pkgmaster, err := vcs.RepoRootForImportPath(pkg, *verbose)
+ if err != nil {
+ log.Printf("Error finding subrepo (%s): %s", pkg, err)
+ continue
+ }
+ pkgroot := &Repo{
+ Path: filepath.Join(*buildroot, pkg),
+ Master: pkgmaster,
+ }
+ commitPoll(pkgroot, pkg, key)
+ }
+ benchMutex.RUnlock()
+ if *verbose {
+ log.Printf("sleep...")
+ }
+ time.Sleep(*commitInterval)
+ benchMutex.RLock()
+ }
+}
+
+// logByHash is a cache of all Mercurial revisions we know about,
+// indexed by full hash.
+var logByHash = map[string]*HgLog{}
+
+// commitPoll pulls any new revisions from the hg server
+// and tells the server about them.
+func commitPoll(repo *Repo, pkg, key string) {
+ pkgPath := filepath.Join(*buildroot, repo.Master.Root)
+ if !repo.Exists() {
+ var err error
+ repo, err = RemoteRepo(pkg, pkgPath)
+ if err != nil {
+ log.Printf("Error cloning package (%s): %s", pkg, err)
+ return
+ }
+
+ path := repo.Path
+ repo, err = repo.Clone(path, "tip")
+ if err != nil {
+ log.Printf("%s: hg clone failed: %v", pkg, err)
+ if err := os.RemoveAll(path); err != nil {
+ log.Printf("%s: %v", pkg, err)
+ }
+ }
+ return
+ }
+
+ logs, err := repo.Log() // repo.Log calls repo.Pull internally
+ if err != nil {
+ log.Printf("hg log: %v", err)
+ return
+ }
+
+ // Pass 1. Fill in parents and add new log entries to logByHash.
+ // Empty parent means take parent from next log entry.
+ // Non-empty parent has form 1234:hashhashhash; we want full hash.
+ for i := range logs {
+ l := &logs[i]
+ if l.Parent == "" && i+1 < len(logs) {
+ l.Parent = logs[i+1].Hash
+ } else if l.Parent != "" {
+ l.Parent, _ = repo.FullHash(l.Parent)
+ }
+ if *verbose {
+ log.Printf("hg log %s: %s < %s\n", pkg, l.Hash, l.Parent)
+ }
+ if logByHash[l.Hash] == nil {
+ l.bench = needsBenchmarking(l)
+ // These fields are needed only for needsBenchmarking, do not waste memory.
+ l.Branch = ""
+ l.Files = ""
+ // Make copy to avoid pinning entire slice when only one entry is new.
+ t := *l
+ logByHash[t.Hash] = &t
+ }
+ }
+
+ for _, l := range logs {
+ addCommit(pkg, l.Hash, key)
+ }
+}
+
+// addCommit adds the commit with the named hash to the dashboard.
+// key is the secret key for authentication to the dashboard.
+// It avoids duplicate effort.
+func addCommit(pkg, hash, key string) bool {
+ l := logByHash[hash]
+ if l == nil {
+ return false
+ }
+ if l.added {
+ return true
+ }
+
+ // Check for already added, perhaps in an earlier run.
+ if dashboardCommit(pkg, hash) {
+ log.Printf("%s already on dashboard\n", hash)
+ // Record that this hash is on the dashboard,
+ // as must be all its parents.
+ for l != nil {
+ l.added = true
+ l = logByHash[l.Parent]
+ }
+ return true
+ }
+
+ // Create parent first, to maintain some semblance of order.
+ if l.Parent != "" {
+ if !addCommit(pkg, l.Parent, key) {
+ return false
+ }
+ }
+
+ // Create commit.
+ if err := postCommit(key, pkg, l); err != nil {
+ log.Printf("failed to add %s to dashboard: %v", hash, err)
+ return false
+ }
+ l.added = true
+ return true
+}
+
+// defaultSuffix returns file extension used for command files in
+// current os environment.
+func defaultSuffix() string {
+ switch runtime.GOOS {
+ case "windows":
+ return ".bat"
+ case "plan9":
+ return ".rc"
+ default:
+ return ".bash"
+ }
+}
+
+func defaultExeExt() string {
+ switch runtime.GOOS {
+ case "windows":
+ return ".exe"
+ default:
+ return ""
+ }
+}
+
+// defaultBuildRoot returns default buildroot directory.
+func defaultBuildRoot() string {
+ var d string
+ if runtime.GOOS == "windows" {
+ // will use c:\, otherwise absolute paths become too long
+ // during builder run, see http://golang.org/issue/3358.
+ d = `c:\`
+ } else {
+ d = os.TempDir()
+ }
+ return filepath.Join(d, "gobuilder")
+}
+
+// removePath is a more robust version of os.RemoveAll.
+// On windows, if remove fails (which can happen if test/benchmark timeouts
+// and keeps some files open) it tries to rename the dir.
+func removePath(path string) error {
+ if err := os.RemoveAll(path); err != nil {
+ if runtime.GOOS == "windows" {
+ err = os.Rename(path, filepath.Clean(path)+"_remove_me")
+ }
+ return err
+ }
+ return nil
+}
diff --git a/llgo/third_party/go.tools/dashboard/builder/vcs.go b/llgo/third_party/go.tools/dashboard/builder/vcs.go
new file mode 100644
index 0000000000000000000000000000000000000000..e7bfc7b681d710a3e4b7211178ae0b1a62d029d2
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/builder/vcs.go
@@ -0,0 +1,212 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "llvm.org/llgo/third_party/go.tools/go/vcs"
+)
+
+// Repo represents a mercurial repository.
+type Repo struct {
+ Path string
+ Master *vcs.RepoRoot
+ sync.Mutex
+}
+
+// RemoteRepo constructs a *Repo representing a remote repository.
+func RemoteRepo(url, path string) (*Repo, error) {
+ rr, err := vcs.RepoRootForImportPath(url, *verbose)
+ if err != nil {
+ return nil, err
+ }
+ return &Repo{
+ Path: path,
+ Master: rr,
+ }, nil
+}
+
+// Clone clones the current Repo to a new destination
+// returning a new *Repo if successful.
+func (r *Repo) Clone(path, rev string) (*Repo, error) {
+ r.Lock()
+ defer r.Unlock()
+
+ err := timeout(*cmdTimeout, func() error {
+ downloadPath := r.Path
+ if !r.Exists() {
+ downloadPath = r.Master.Repo
+ }
+
+ err := r.Master.VCS.CreateAtRev(path, downloadPath, rev)
+ if err != nil {
+ return err
+ }
+ return r.Master.VCS.TagSync(path, "")
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &Repo{
+ Path: path,
+ Master: r.Master,
+ }, nil
+}
+
+// Export exports the current Repo at revision rev to a new destination.
+func (r *Repo) Export(path, rev string) error {
+ r.Lock()
+ defer r.Unlock()
+
+ downloadPath := r.Path
+ if !r.Exists() {
+ _, err := r.Clone(path, rev)
+ return err
+ }
+
+ cmd := exec.Command(r.Master.VCS.Cmd, "archive", "-t", "files", "-r", rev, path)
+ cmd.Dir = downloadPath
+ if err := run(cmd); err != nil {
+ return fmt.Errorf("executing %v: %v", cmd.Args, err)
+ }
+ return nil
+}
+
+// UpdateTo updates the working copy of this Repo to the
+// supplied revision.
+func (r *Repo) UpdateTo(hash string) error {
+ r.Lock()
+ defer r.Unlock()
+
+ return timeout(*cmdTimeout, func() error {
+ return r.Master.VCS.TagSync(r.Path, hash)
+ })
+}
+
+// Exists reports whether this Repo represents a valid Mecurial repository.
+func (r *Repo) Exists() bool {
+ fi, err := os.Stat(filepath.Join(r.Path, "."+r.Master.VCS.Cmd))
+ if err != nil {
+ return false
+ }
+ return fi.IsDir()
+}
+
+// Pull pulls changes from the default path, that is, the path
+// this Repo was cloned from.
+func (r *Repo) Pull() error {
+ r.Lock()
+ defer r.Unlock()
+
+ return timeout(*cmdTimeout, func() error {
+ return r.Master.VCS.Download(r.Path)
+ })
+}
+
+// Log returns the changelog for this repository.
+func (r *Repo) Log() ([]HgLog, error) {
+ if err := r.Pull(); err != nil {
+ return nil, err
+ }
+ r.Lock()
+ defer r.Unlock()
+
+ var logStruct struct {
+ Log []HgLog
+ }
+ err := timeout(*cmdTimeout, func() error {
+ data, err := r.Master.VCS.Log(r.Path, xmlLogTemplate)
+ if err != nil {
+ return err
+ }
+
+ // We have a commit with description that contains 0x1b byte.
+ // Mercurial does not escape it, but xml.Unmarshal does not accept it.
+ data = bytes.Replace(data, []byte{0x1b}, []byte{'?'}, -1)
+
+ err = xml.Unmarshal([]byte(""+string(data)+" "), &logStruct)
+ if err != nil {
+ return fmt.Errorf("unmarshal %s log: %v", r.Master.VCS, err)
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ for i, log := range logStruct.Log {
+ // Let's pretend there can be only one parent.
+ if log.Parent != "" && strings.Contains(log.Parent, " ") {
+ logStruct.Log[i].Parent = strings.Split(log.Parent, " ")[0]
+ }
+ }
+ return logStruct.Log, nil
+}
+
+// FullHash returns the full hash for the given Mercurial revision.
+func (r *Repo) FullHash(rev string) (string, error) {
+ r.Lock()
+ defer r.Unlock()
+
+ var hash string
+ err := timeout(*cmdTimeout, func() error {
+ data, err := r.Master.VCS.LogAtRev(r.Path, rev, "{node}")
+ if err != nil {
+ return err
+ }
+
+ s := strings.TrimSpace(string(data))
+ if s == "" {
+ return fmt.Errorf("cannot find revision")
+ }
+ if len(s) != 40 {
+ return fmt.Errorf("%s returned invalid hash: %s", r.Master.VCS, s)
+ }
+ hash = s
+ return nil
+ })
+ if err != nil {
+ return "", err
+ }
+ return hash, nil
+}
+
+// HgLog represents a single Mercurial revision.
+type HgLog struct {
+ Hash string
+ Author string
+ Date string
+ Desc string
+ Parent string
+ Branch string
+ Files string
+
+ // Internal metadata
+ added bool
+ bench bool // needs to be benchmarked?
+}
+
+// xmlLogTemplate is a template to pass to Mercurial to make
+// hg log print the log in valid XML for parsing with xml.Unmarshal.
+// Can not escape branches and files, because it crashes python with:
+// AttributeError: 'NoneType' object has no attribute 'replace'
+const xmlLogTemplate = `
+
+ {node|escape}
+ {p1node}
+ {author|escape}
+ {date|rfc3339date}
+ {desc|escape}
+ {branches}
+ {files}
+
+`
diff --git a/llgo/third_party/go.tools/dashboard/coordinator/Makefile b/llgo/third_party/go.tools/dashboard/coordinator/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..ec1d88b531ad2ec8c705058b6be7bdd76f51370b
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/coordinator/Makefile
@@ -0,0 +1,6 @@
+coordinator: main.go
+ GOOS=linux go build -o coordinator .
+
+upload: coordinator
+ cat coordinator | (cd buildongce && go run create.go --write_object=go-builder-data/coordinator)
+
diff --git a/llgo/third_party/go.tools/dashboard/coordinator/buildongce/create.go b/llgo/third_party/go.tools/dashboard/coordinator/buildongce/create.go
new file mode 100644
index 0000000000000000000000000000000000000000..d4f0cd679f66104e91c6e82d24bab4f802e7f744
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/coordinator/buildongce/create.go
@@ -0,0 +1,306 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ "code.google.com/p/goauth2/oauth"
+ compute "code.google.com/p/google-api-go-client/compute/v1"
+)
+
+var (
+ proj = flag.String("project", "symbolic-datum-552", "name of Project")
+ zone = flag.String("zone", "us-central1-a", "GCE zone")
+ mach = flag.String("machinetype", "n1-standard-16", "Machine type")
+ instName = flag.String("instance_name", "go-builder-1", "Name of VM instance.")
+ sshPub = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.")
+ staticIP = flag.String("static_ip", "", "Static IP to use. If empty, automatic.")
+ reuseDisk = flag.Bool("reuse_disk", true, "Whether disk images should be reused between shutdowns/restarts.")
+
+ writeObject = flag.String("write_object", "", "If non-empty, a VM isn't created and the flag value is Google Cloud Storage bucket/object to write. The contents from stdin.")
+)
+
+func readFile(v string) string {
+ slurp, err := ioutil.ReadFile(v)
+ if err != nil {
+ log.Fatalf("Error reading %s: %v", v, err)
+ }
+ return strings.TrimSpace(string(slurp))
+}
+
+var config = &oauth.Config{
+ // The client-id and secret should be for an "Installed Application" when using
+ // the CLI. Later we'll use a web application with a callback.
+ ClientId: readFile("client-id.dat"),
+ ClientSecret: readFile("client-secret.dat"),
+ Scope: strings.Join([]string{
+ compute.DevstorageFull_controlScope,
+ compute.ComputeScope,
+ "https://www.googleapis.com/auth/sqlservice",
+ "https://www.googleapis.com/auth/sqlservice.admin",
+ }, " "),
+ AuthURL: "https://accounts.google.com/o/oauth2/auth",
+ TokenURL: "https://accounts.google.com/o/oauth2/token",
+ RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
+}
+
+const baseConfig = `#cloud-config
+coreos:
+ units:
+ - name: gobuild.service
+ command: start
+ content: |
+ [Unit]
+ Description=Go Builders
+ After=docker.service
+ Requires=docker.service
+
+ [Service]
+ ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/coordinator http://storage.googleapis.com/go-builder-data/coordinator && chmod +x /opt/bin/coordinator'
+ ExecStart=/opt/bin/coordinator
+ RestartSec=10s
+ Restart=always
+ Type=simple
+
+ [Install]
+ WantedBy=multi-user.target
+`
+
+func main() {
+ flag.Parse()
+ if *proj == "" {
+ log.Fatalf("Missing --project flag")
+ }
+ prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj
+ machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach
+
+ tr := &oauth.Transport{
+ Config: config,
+ }
+
+ tokenCache := oauth.CacheFile("token.dat")
+ token, err := tokenCache.Token()
+ if err != nil {
+ if *writeObject != "" {
+ log.Fatalf("Can't use --write_object without a valid token.dat file already cached.")
+ }
+ log.Printf("Error getting token from %s: %v", string(tokenCache), err)
+ log.Printf("Get auth code from %v", config.AuthCodeURL("my-state"))
+ fmt.Print("\nEnter auth code: ")
+ sc := bufio.NewScanner(os.Stdin)
+ sc.Scan()
+ authCode := strings.TrimSpace(sc.Text())
+ token, err = tr.Exchange(authCode)
+ if err != nil {
+ log.Fatalf("Error exchanging auth code for a token: %v", err)
+ }
+ tokenCache.PutToken(token)
+ }
+
+ tr.Token = token
+ oauthClient := &http.Client{Transport: tr}
+ if *writeObject != "" {
+ writeCloudStorageObject(oauthClient)
+ return
+ }
+
+ computeService, _ := compute.New(oauthClient)
+
+ natIP := *staticIP
+ if natIP == "" {
+ // Try to find it by name.
+ aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do()
+ if err != nil {
+ log.Fatal(err)
+ }
+ // http://godoc.org/code.google.com/p/google-api-go-client/compute/v1#AddressAggregatedList
+ IPLoop:
+ for _, asl := range aggAddrList.Items {
+ for _, addr := range asl.Addresses {
+ if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" {
+ natIP = addr.Address
+ break IPLoop
+ }
+ }
+ }
+ }
+
+ cloudConfig := baseConfig
+ if *sshPub != "" {
+ key := strings.TrimSpace(readFile(*sshPub))
+ cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", key)
+ }
+ if os.Getenv("USER") == "bradfitz" {
+ cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com")
+ }
+ const maxCloudConfig = 32 << 10 // per compute API docs
+ if len(cloudConfig) > maxCloudConfig {
+ log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig)
+ }
+
+ instance := &compute.Instance{
+ Name: *instName,
+ Description: "Go Builder",
+ MachineType: machType,
+ Disks: []*compute.AttachedDisk{instanceDisk(computeService)},
+ Tags: &compute.Tags{
+ Items: []string{"http-server", "https-server"},
+ },
+ Metadata: &compute.Metadata{
+ Items: []*compute.MetadataItems{
+ {
+ Key: "user-data",
+ Value: cloudConfig,
+ },
+ },
+ },
+ NetworkInterfaces: []*compute.NetworkInterface{
+ &compute.NetworkInterface{
+ AccessConfigs: []*compute.AccessConfig{
+ &compute.AccessConfig{
+ Type: "ONE_TO_ONE_NAT",
+ Name: "External NAT",
+ NatIP: natIP,
+ },
+ },
+ Network: prefix + "/global/networks/default",
+ },
+ },
+ ServiceAccounts: []*compute.ServiceAccount{
+ {
+ Email: "default",
+ Scopes: []string{
+ compute.DevstorageFull_controlScope,
+ compute.ComputeScope,
+ },
+ },
+ },
+ }
+
+ log.Printf("Creating instance...")
+ op, err := computeService.Instances.Insert(*proj, *zone, instance).Do()
+ if err != nil {
+ log.Fatalf("Failed to create instance: %v", err)
+ }
+ opName := op.Name
+ log.Printf("Created. Waiting on operation %v", opName)
+OpLoop:
+ for {
+ time.Sleep(2 * time.Second)
+ op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do()
+ if err != nil {
+ log.Fatalf("Failed to get op %s: %v", opName, err)
+ }
+ switch op.Status {
+ case "PENDING", "RUNNING":
+ log.Printf("Waiting on operation %v", opName)
+ continue
+ case "DONE":
+ if op.Error != nil {
+ for _, operr := range op.Error.Errors {
+ log.Printf("Error: %+v", operr)
+ }
+ log.Fatalf("Failed to start.")
+ }
+ log.Printf("Success. %+v", op)
+ break OpLoop
+ default:
+ log.Fatalf("Unknown status %q: %+v", op.Status, op)
+ }
+ }
+
+ inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do()
+ if err != nil {
+ log.Fatalf("Error getting instance after creation: %v", err)
+ }
+ ij, _ := json.MarshalIndent(inst, "", " ")
+ log.Printf("Instance: %s", ij)
+}
+
+func instanceDisk(svc *compute.Service) *compute.AttachedDisk {
+ const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-alpha-402-2-0-v20140807"
+ diskName := *instName + "-coreos-stateless-pd"
+
+ if *reuseDisk {
+ dl, err := svc.Disks.List(*proj, *zone).Do()
+ if err != nil {
+ log.Fatalf("Error listing disks: %v", err)
+ }
+ for _, disk := range dl.Items {
+ if disk.Name != diskName {
+ continue
+ }
+ return &compute.AttachedDisk{
+ AutoDelete: false,
+ Boot: true,
+ DeviceName: diskName,
+ Type: "PERSISTENT",
+ Source: disk.SelfLink,
+ Mode: "READ_WRITE",
+
+ // The GCP web UI's "Show REST API" link includes a
+ // "zone" parameter, but it's not in the API
+ // description. But it wants this form (disk.Zone, a
+ // full zone URL, not *zone):
+ // Zone: disk.Zone,
+ // ... but it seems to work without it. Keep this
+ // comment here until I file a bug with the GCP
+ // people.
+ }
+ }
+ }
+
+ return &compute.AttachedDisk{
+ AutoDelete: !*reuseDisk,
+ Boot: true,
+ Type: "PERSISTENT",
+ InitializeParams: &compute.AttachedDiskInitializeParams{
+ DiskName: diskName,
+ SourceImage: imageURL,
+ DiskSizeGb: 50,
+ },
+ }
+}
+
+func writeCloudStorageObject(httpClient *http.Client) {
+ content := os.Stdin
+ const maxSlurp = 1 << 20
+ var buf bytes.Buffer
+ n, err := io.CopyN(&buf, content, maxSlurp)
+ if err != nil && err != io.EOF {
+ log.Fatalf("Error reading from stdin: %v, %v", n, err)
+ }
+ contentType := http.DetectContentType(buf.Bytes())
+
+ req, err := http.NewRequest("PUT", "https://storage.googleapis.com/"+*writeObject, io.MultiReader(&buf, content))
+ if err != nil {
+ log.Fatal(err)
+ }
+ req.Header.Set("x-goog-api-version", "2")
+ req.Header.Set("x-goog-acl", "public-read")
+ req.Header.Set("Content-Type", contentType)
+ res, err := httpClient.Do(req)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if res.StatusCode != 200 {
+ res.Write(os.Stderr)
+ log.Fatalf("Failed.")
+ }
+ log.Printf("Success.")
+ os.Exit(0)
+}
diff --git a/llgo/third_party/go.tools/dashboard/coordinator/main.go b/llgo/third_party/go.tools/dashboard/coordinator/main.go
new file mode 100644
index 0000000000000000000000000000000000000000..a8550001ada1804974083113ea0c4e887f82dfea
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/coordinator/main.go
@@ -0,0 +1,458 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The coordinator runs on GCE and coordinates builds in Docker containers.
+package main
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/md5"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os"
+ "os/exec"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+var (
+ masterKeyFile = flag.String("masterkey", "", "Path to builder master key. Else fetched using GCE project attribute 'builder-master-key'.")
+ maxBuilds = flag.Int("maxbuilds", 6, "Max concurrent builds")
+
+ // Debug flags:
+ addTemp = flag.Bool("temp", false, "Append -temp to all builders.")
+ just = flag.String("just", "", "If non-empty, run single build in the foreground. Requires rev.")
+ rev = flag.String("rev", "", "Revision to build.")
+)
+
+var (
+ startTime = time.Now()
+ builders = map[string]buildConfig{} // populated once at startup
+ donec = make(chan builderRev) // reports of finished builders
+
+ statusMu sync.Mutex
+ status = map[builderRev]*buildStatus{}
+)
+
+type imageInfo struct {
+ url string // of tar file
+
+ mu sync.Mutex
+ lastMod string
+}
+
+var images = map[string]*imageInfo{
+ "gobuilders/linux-x86-base": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.base.tar.gz"},
+ "gobuilders/linux-x86-clang": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.clang.tar.gz"},
+ "gobuilders/linux-x86-gccgo": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.gccgo.tar.gz"},
+ "gobuilders/linux-x86-nacl": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.nacl.tar.gz"},
+ "gobuilders/linux-x86-sid": {url: "https://storage.googleapis.com/go-builder-data/docker-linux.sid.tar.gz"},
+}
+
+type buildConfig struct {
+ name string // "linux-amd64-race"
+ image string // Docker image to use to build
+ cmd string // optional -cmd flag (relative to go/src/)
+ env []string // extra environment ("key=value") pairs
+ dashURL string // url of the build dashboard
+ tool string // the tool this configuration is for
+}
+
+func main() {
+ flag.Parse()
+ addBuilder(buildConfig{name: "linux-386"})
+ addBuilder(buildConfig{name: "linux-386-387", env: []string{"GO386=387"}})
+ addBuilder(buildConfig{name: "linux-amd64"})
+ addBuilder(buildConfig{name: "linux-amd64-nocgo", env: []string{"CGO_ENABLED=0", "USER=root"}})
+ addBuilder(buildConfig{name: "linux-amd64-noopt", env: []string{"GO_GCFLAGS=-N -l"}})
+ addBuilder(buildConfig{name: "linux-amd64-race"})
+ addBuilder(buildConfig{name: "nacl-386"})
+ addBuilder(buildConfig{name: "nacl-amd64p32"})
+ addBuilder(buildConfig{
+ name: "linux-amd64-gccgo",
+ image: "gobuilders/linux-x86-gccgo",
+ cmd: "make RUNTESTFLAGS=\"--target_board=unix/-m64\" check-go -j16",
+ dashURL: "https://build.golang.org/gccgo",
+ tool: "gccgo",
+ })
+ addBuilder(buildConfig{
+ name: "linux-386-gccgo",
+ image: "gobuilders/linux-x86-gccgo",
+ cmd: "make RUNTESTFLAGS=\"--target_board=unix/-m32\" check-go -j16",
+ dashURL: "https://build.golang.org/gccgo",
+ tool: "gccgo",
+ })
+ addBuilder(buildConfig{name: "linux-386-sid", image: "gobuilders/linux-x86-sid"})
+ addBuilder(buildConfig{name: "linux-amd64-sid", image: "gobuilders/linux-x86-sid"})
+ addBuilder(buildConfig{name: "linux-386-clang", image: "gobuilders/linux-x86-clang"})
+ addBuilder(buildConfig{name: "linux-amd64-clang", image: "gobuilders/linux-x86-clang"})
+
+ if (*just != "") != (*rev != "") {
+ log.Fatalf("--just and --rev must be used together")
+ }
+ if *just != "" {
+ conf, ok := builders[*just]
+ if !ok {
+ log.Fatalf("unknown builder %q", *just)
+ }
+ cmd := exec.Command("docker", append([]string{"run"}, conf.dockerRunArgs(*rev)...)...)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ log.Fatalf("Build failed: %v", err)
+ }
+ return
+ }
+
+ http.HandleFunc("/", handleStatus)
+ http.HandleFunc("/logs", handleLogs)
+ go http.ListenAndServe(":80", nil)
+
+ workc := make(chan builderRev)
+ for name, builder := range builders {
+ go findWorkLoop(name, builder.dashURL, workc)
+ }
+
+ ticker := time.NewTicker(1 * time.Minute)
+ for {
+ select {
+ case work := <-workc:
+ log.Printf("workc received %+v; len(status) = %v, maxBuilds = %v; cur = %p", work, len(status), *maxBuilds, status[work])
+ mayBuild := mayBuildRev(work)
+ if mayBuild {
+ out, _ := exec.Command("docker", "ps").Output()
+ numBuilds := bytes.Count(out, []byte("\n")) - 1
+ log.Printf("num current docker builds: %d", numBuilds)
+ if numBuilds > *maxBuilds {
+ mayBuild = false
+ }
+ }
+ if mayBuild {
+ if st, err := startBuilding(builders[work.name], work.rev); err == nil {
+ setStatus(work, st)
+ log.Printf("%v now building in %v", work, st.container)
+ } else {
+ log.Printf("Error starting to build %v: %v", work, err)
+ }
+ }
+ case done := <-donec:
+ log.Printf("%v done", done)
+ setStatus(done, nil)
+ case <-ticker.C:
+ if numCurrentBuilds() == 0 && time.Now().After(startTime.Add(10*time.Minute)) {
+ // TODO: halt the whole machine to kill the VM or something
+ }
+ }
+ }
+}
+
+func numCurrentBuilds() int {
+ statusMu.Lock()
+ defer statusMu.Unlock()
+ return len(status)
+}
+
+func mayBuildRev(work builderRev) bool {
+ statusMu.Lock()
+ defer statusMu.Unlock()
+ return len(status) < *maxBuilds && status[work] == nil
+}
+
+func setStatus(work builderRev, st *buildStatus) {
+ statusMu.Lock()
+ defer statusMu.Unlock()
+ if st == nil {
+ delete(status, work)
+ } else {
+ status[work] = st
+ }
+}
+
+func getStatus(work builderRev) *buildStatus {
+ statusMu.Lock()
+ defer statusMu.Unlock()
+ return status[work]
+}
+
+type byAge []*buildStatus
+
+func (s byAge) Len() int { return len(s) }
+func (s byAge) Less(i, j int) bool { return s[i].start.Before(s[j].start) }
+func (s byAge) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func handleStatus(w http.ResponseWriter, r *http.Request) {
+ var active []*buildStatus
+ statusMu.Lock()
+ for _, st := range status {
+ active = append(active, st)
+ }
+ statusMu.Unlock()
+
+ fmt.Fprintf(w, "Go build coordinator %d of max %d builds running:
", len(status), *maxBuilds)
+ sort.Sort(byAge(active))
+ for _, st := range active {
+ fmt.Fprintf(w, "%-22s hg %s in container %s , %v ago\n", st.name, st.rev, st.name, st.rev,
+ st.container, time.Now().Sub(st.start))
+ }
+ fmt.Fprintf(w, " ")
+}
+
+func handleLogs(w http.ResponseWriter, r *http.Request) {
+ st := getStatus(builderRev{r.FormValue("name"), r.FormValue("rev")})
+ if st == nil {
+ fmt.Fprintf(w, "not building ")
+ return
+ }
+ out, err := exec.Command("docker", "logs", st.container).CombinedOutput()
+ if err != nil {
+ log.Print(err)
+ http.Error(w, "Error fetching logs. Already finished?", 500)
+ return
+ }
+ key := builderKey(st.name)
+ logs := strings.Replace(string(out), key, "BUILDERKEY", -1)
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ io.WriteString(w, logs)
+}
+
+func findWorkLoop(builderName, dashURL string, work chan<- builderRev) {
+ // TODO: make this better
+ for {
+ rev, err := findWork(builderName, dashURL)
+ if err != nil {
+ log.Printf("Finding work for %s: %v", builderName, err)
+ } else if rev != "" {
+ work <- builderRev{builderName, rev}
+ }
+ time.Sleep(60 * time.Second)
+ }
+}
+
+func findWork(builderName, dashURL string) (rev string, err error) {
+ var jres struct {
+ Response struct {
+ Kind string
+ Data struct {
+ Hash string
+ PerfResults []string
+ }
+ }
+ }
+ res, err := http.Get(dashURL + "/todo?builder=" + builderName + "&kind=build-go-commit")
+ if err != nil {
+ return
+ }
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ return "", fmt.Errorf("unexpected http status %d", res.StatusCode)
+ }
+ err = json.NewDecoder(res.Body).Decode(&jres)
+ if jres.Response.Kind == "build-go-commit" {
+ rev = jres.Response.Data.Hash
+ }
+ return rev, err
+}
+
+type builderRev struct {
+ name, rev string
+}
+
+// returns the part after "docker run"
+func (conf buildConfig) dockerRunArgs(rev string) (args []string) {
+ if key := builderKey(conf.name); key != "" {
+ tmpKey := "/tmp/" + conf.name + ".buildkey"
+ if _, err := os.Stat(tmpKey); err != nil {
+ if err := ioutil.WriteFile(tmpKey, []byte(key), 0600); err != nil {
+ log.Fatal(err)
+ }
+ }
+ // Images may look for .gobuildkey in / or /root, so provide both.
+ // TODO(adg): fix images that look in the wrong place.
+ args = append(args, "-v", tmpKey+":/.gobuildkey")
+ args = append(args, "-v", tmpKey+":/root/.gobuildkey")
+ }
+ for _, pair := range conf.env {
+ args = append(args, "-e", pair)
+ }
+ args = append(args,
+ conf.image,
+ "/usr/local/bin/builder",
+ "-rev="+rev,
+ "-dashboard="+conf.dashURL,
+ "-tool="+conf.tool,
+ "-buildroot=/",
+ "-v",
+ )
+ if conf.cmd != "" {
+ args = append(args, "-cmd", conf.cmd)
+ }
+ args = append(args, conf.name)
+ return
+}
+
+func addBuilder(c buildConfig) {
+ if c.name == "" {
+ panic("empty name")
+ }
+ if *addTemp {
+ c.name += "-temp"
+ }
+ if _, dup := builders[c.name]; dup {
+ panic("dup name")
+ }
+ if c.dashURL == "" {
+ c.dashURL = "https://build.golang.org"
+ }
+ if c.tool == "" {
+ c.tool = "go"
+ }
+
+ if strings.HasPrefix(c.name, "nacl-") {
+ if c.image == "" {
+ c.image = "gobuilders/linux-x86-nacl"
+ }
+ if c.cmd == "" {
+ c.cmd = "/usr/local/bin/build-command.pl"
+ }
+ }
+ if strings.HasPrefix(c.name, "linux-") && c.image == "" {
+ c.image = "gobuilders/linux-x86-base"
+ }
+ if c.image == "" {
+ panic("empty image")
+ }
+ builders[c.name] = c
+}
+
+func condUpdateImage(img string) error {
+ ii := images[img]
+ if ii == nil {
+ log.Fatalf("Image %q not described.", img)
+ }
+ ii.mu.Lock()
+ defer ii.mu.Unlock()
+ res, err := http.Head(ii.url)
+ if err != nil {
+ return fmt.Errorf("Error checking %s: %v", ii.url, err)
+ }
+ if res.StatusCode != 200 {
+ return fmt.Errorf("Error checking %s: %v", ii.url, res.Status)
+ }
+ if res.Header.Get("Last-Modified") == ii.lastMod {
+ return nil
+ }
+
+ res, err = http.Get(ii.url)
+ if err != nil || res.StatusCode != 200 {
+ return fmt.Errorf("Get after Head failed for %s: %v, %v", ii.url, err, res)
+ }
+ defer res.Body.Close()
+
+ log.Printf("Running: docker load of %s\n", ii.url)
+ cmd := exec.Command("docker", "load")
+ cmd.Stdin = res.Body
+
+ var out bytes.Buffer
+ cmd.Stdout = &out
+ cmd.Stderr = &out
+
+ if cmd.Run(); err != nil {
+ log.Printf("Failed to pull latest %s from %s and pipe into docker load: %v, %s", img, ii.url, err, out.Bytes())
+ return err
+ }
+ ii.lastMod = res.Header.Get("Last-Modified")
+ return nil
+}
+
+func startBuilding(conf buildConfig, rev string) (*buildStatus, error) {
+ if err := condUpdateImage(conf.image); err != nil {
+ log.Printf("Failed to setup container for %v %v: %v", conf.name, rev, err)
+ return nil, err
+ }
+
+ cmd := exec.Command("docker", append([]string{"run", "-d"}, conf.dockerRunArgs(rev)...)...)
+ all, err := cmd.CombinedOutput()
+ log.Printf("Docker run for %v %v = err:%v, output:%s", conf.name, rev, err, all)
+ if err != nil {
+ return nil, err
+ }
+ container := strings.TrimSpace(string(all))
+ go func() {
+ all, err := exec.Command("docker", "wait", container).CombinedOutput()
+ log.Printf("docker wait %s/%s: %v, %s", container, rev, err, strings.TrimSpace(string(all)))
+ donec <- builderRev{conf.name, rev}
+ exec.Command("docker", "rm", container).Run()
+ }()
+ return &buildStatus{
+ builderRev: builderRev{
+ name: conf.name,
+ rev: rev,
+ },
+ container: container,
+ start: time.Now(),
+ }, nil
+}
+
+type buildStatus struct {
+ builderRev
+ container string
+ start time.Time
+
+ mu sync.Mutex
+ // ...
+}
+
+func builderKey(builder string) string {
+ master := masterKey()
+ if len(master) == 0 {
+ return ""
+ }
+ h := hmac.New(md5.New, master)
+ io.WriteString(h, builder)
+ return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+func masterKey() []byte {
+ keyOnce.Do(loadKey)
+ return masterKeyCache
+}
+
+var (
+ keyOnce sync.Once
+ masterKeyCache []byte
+)
+
+func loadKey() {
+ if *masterKeyFile != "" {
+ b, err := ioutil.ReadFile(*masterKeyFile)
+ if err != nil {
+ log.Fatal(err)
+ }
+ masterKeyCache = bytes.TrimSpace(b)
+ return
+ }
+ req, _ := http.NewRequest("GET", "http://metadata.google.internal/computeMetadata/v1/project/attributes/builder-master-key", nil)
+ req.Header.Set("Metadata-Flavor", "Google")
+ res, err := http.DefaultClient.Do(req)
+ if err != nil {
+ log.Fatal("No builder master key available")
+ }
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ log.Fatalf("No builder-master-key project attribute available.")
+ }
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ log.Fatal(err)
+ }
+ masterKeyCache = bytes.TrimSpace(slurp)
+}
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-base/Dockerfile b/llgo/third_party/go.tools/dashboard/env/linux-x86-base/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..37e1a081922de835b694701ac637c9c3b90d6f95
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-base/Dockerfile
@@ -0,0 +1,16 @@
+# Copyright 2014 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Base builder image: gobuilders/linux-x86-base
+
+FROM debian:wheezy
+MAINTAINER golang-dev
+
+ENV DEBIAN_FRONTEND noninteractive
+
+ADD /scripts/install-apt-deps.sh /scripts/
+RUN /scripts/install-apt-deps.sh
+
+ADD /scripts/build-go-builder.sh /scripts/
+RUN GO_REV=8c27884843c3 BUILDER_REV=ced78bfefcb3 /scripts/build-go-builder.sh && test -f /usr/local/bin/builder
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-base/Makefile b/llgo/third_party/go.tools/dashboard/env/linux-x86-base/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..d94baf6d9e9b04906a55917f15efc7e45474527c
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-base/Makefile
@@ -0,0 +1,12 @@
+# Copyright 2014 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+docker: Dockerfile
+ docker build -t gobuilders/linux-x86-base .
+
+docker-linux.base.tar.gz: docker
+ docker save gobuilders/linux-x86-base | gzip | (cd ../../coordinator/buildongce && go run create.go --write_object=go-builder-data/docker-linux.base.tar.gz)
+
+check: docker
+ docker run gobuilders/linux-x86-base /usr/local/bin/builder -rev=8c27884843c3 -buildroot=/ -v -report=false linux-amd64-temp
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-base/README b/llgo/third_party/go.tools/dashboard/env/linux-x86-base/README
new file mode 100644
index 0000000000000000000000000000000000000000..a5119090c6347a2018e89f25193e951a168718e8
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-base/README
@@ -0,0 +1,11 @@
+For now, you can at least do a single build of a single revision:
+
+$ export BUILD=linux-amd64-temp
+$ docker run \
+ -v $HOME/keys/$BUILD.buildkey:/.gobuildkey \
+ gobuilders/linux-x86-base \
+ /usr/local/bin/builder -rev=50ac9eded6ad -buildroot=/ -v $BUILD
+
+TODO(bradfitz): automate with CoreOS + GCE, ala:
+ https://github.com/bradfitz/camlistore/blob/master/misc/gce/create.go
+
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-base/scripts/build-go-builder.sh b/llgo/third_party/go.tools/dashboard/env/linux-x86-base/scripts/build-go-builder.sh
new file mode 100755
index 0000000000000000000000000000000000000000..097ac171c184adcbeabf50dc1c01c73dee218cc7
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-base/scripts/build-go-builder.sh
@@ -0,0 +1,20 @@
+set -ex
+
+export GOPATH=/gopath
+export GOROOT=/goroot
+PREFIX=/usr/local
+: ${GO_REV:?"need to be set to the golang repo revision used to build the builder."}
+: ${BUILDER_REV:?"need to be set to the go.tools repo revision for the builder."}
+
+mkdir -p $GOROOT
+curl -s https://storage.googleapis.com/gobuilder/go-snap.tar.gz | tar x --no-same-owner -zv -C $GOROOT
+(cd $GOROOT/src && hg pull -r $GO_REV -u && find && ./make.bash)
+
+GO_TOOLS=$GOPATH/src/golang.org/x/tools
+mkdir -p $GO_TOOLS
+curl -s https://storage.googleapis.com/gobuilder/go.tools-snap.tar.gz | tar x --no-same-owner -zv -C $GO_TOOLS
+
+mkdir -p $PREFIX/bin
+(cd $GO_TOOLS && hg pull -r $BUILDER_REV -u && GOBIN=$PREFIX/bin /goroot/bin/go install golang.org/x/tools/dashboard/builder)
+
+rm -fR $GOROOT/bin $GOROOT/pkg $GOPATH
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-base/scripts/install-apt-deps.sh b/llgo/third_party/go.tools/dashboard/env/linux-x86-base/scripts/install-apt-deps.sh
new file mode 100755
index 0000000000000000000000000000000000000000..839f4ad2fa02de0778ec13e500ae0b7a165f4d7b
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-base/scripts/install-apt-deps.sh
@@ -0,0 +1,17 @@
+set -ex
+
+apt-get update
+# For running curl to get the hg starter tarballs (faster than hg clone).
+apt-get install -y --no-install-recommends curl ca-certificates
+# Optionally used by some net/http tests:
+apt-get install -y --no-install-recommends strace
+# For building Go's bootstrap 'dist' prog
+apt-get install -y --no-install-recommends gcc libc6-dev
+# For 32-bit builds:
+# TODO(bradfitz): move these into a 386 image that derives from this one.
+apt-get install -y --no-install-recommends libc6-dev-i386 gcc-multilib
+# For interacting with the Go source & subrepos:
+apt-get install -y --no-install-recommends mercurial git-core
+
+apt-get clean
+rm -fr /var/lib/apt/lists
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-clang/Dockerfile b/llgo/third_party/go.tools/dashboard/env/linux-x86-clang/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..02ef66e3482f7580286fbae11f2cad137c9689ca
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-clang/Dockerfile
@@ -0,0 +1,20 @@
+# Copyright 2014 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# gobuilders/linux-x86-clang for building with clang instead of gcc.
+
+FROM debian:wheezy
+MAINTAINER golang-dev
+
+ENV DEBIAN_FRONTEND noninteractive
+
+ADD /sources/clang-deps.list /etc/apt/sources.list.d/
+
+ADD /scripts/install-apt-deps.sh /scripts/
+RUN /scripts/install-apt-deps.sh
+
+ADD /scripts/build-go-builder.sh /scripts/
+RUN GO_REV=8c27884843c3 BUILDER_REV=ced78bfefcb3 /scripts/build-go-builder.sh && test -f /usr/local/bin/builder
+
+ENV CC /usr/bin/clang
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-clang/Makefile b/llgo/third_party/go.tools/dashboard/env/linux-x86-clang/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..5e1ed0ff914973444106344a0f94bc559000898d
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-clang/Makefile
@@ -0,0 +1,15 @@
+# Copyright 2014 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+docker: Dockerfile
+ docker build -t gobuilders/linux-x86-clang .
+
+docker-linux.clang.tar.gz: docker
+ docker save gobuilders/linux-x86-clang | gzip | (cd ../../coordinator/buildongce && go run create.go --write_object=go-builder-data/docker-linux.clang.tar.gz)
+
+check: docker
+ docker run gobuilders/linux-x86-clang /usr/local/bin/builder -rev=8c27884843c3 -buildroot=/ -v -report=false linux-amd64-temp
+
+check32: docker
+ docker run gobuilders/linux-x86-clang /usr/local/bin/builder -rev=8c27884843c3 -buildroot=/ -v -report=false linux-386-temp
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-clang/scripts/build-go-builder.sh b/llgo/third_party/go.tools/dashboard/env/linux-x86-clang/scripts/build-go-builder.sh
new file mode 100755
index 0000000000000000000000000000000000000000..097ac171c184adcbeabf50dc1c01c73dee218cc7
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-clang/scripts/build-go-builder.sh
@@ -0,0 +1,20 @@
+set -ex
+
+export GOPATH=/gopath
+export GOROOT=/goroot
+PREFIX=/usr/local
+: ${GO_REV:?"need to be set to the golang repo revision used to build the builder."}
+: ${BUILDER_REV:?"need to be set to the go.tools repo revision for the builder."}
+
+mkdir -p $GOROOT
+curl -s https://storage.googleapis.com/gobuilder/go-snap.tar.gz | tar x --no-same-owner -zv -C $GOROOT
+(cd $GOROOT/src && hg pull -r $GO_REV -u && find && ./make.bash)
+
+GO_TOOLS=$GOPATH/src/golang.org/x/tools
+mkdir -p $GO_TOOLS
+curl -s https://storage.googleapis.com/gobuilder/go.tools-snap.tar.gz | tar x --no-same-owner -zv -C $GO_TOOLS
+
+mkdir -p $PREFIX/bin
+(cd $GO_TOOLS && hg pull -r $BUILDER_REV -u && GOBIN=$PREFIX/bin /goroot/bin/go install golang.org/x/tools/dashboard/builder)
+
+rm -fR $GOROOT/bin $GOROOT/pkg $GOPATH
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-clang/scripts/install-apt-deps.sh b/llgo/third_party/go.tools/dashboard/env/linux-x86-clang/scripts/install-apt-deps.sh
new file mode 100755
index 0000000000000000000000000000000000000000..1382dd6a16c4b4225e7988c8635f1d7e6243ebea
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-clang/scripts/install-apt-deps.sh
@@ -0,0 +1,21 @@
+set -ex
+
+apt-get update
+# For running curl to get the hg starter tarballs (faster than hg clone).
+apt-get install -y --no-install-recommends curl ca-certificates
+# Optionally used by some net/http tests:
+apt-get install -y --no-install-recommends strace
+# For building Go's bootstrap 'dist' prog
+apt-get install -y --no-install-recommends wget
+wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key | apt-key add -
+apt-get update
+apt-get install -y --no-install-recommends clang-3.5
+# TODO(cmang): move these into a 386 image that derives from this one.
+apt-get install -y --no-install-recommends libc6-dev-i386 gcc-multilib
+# Remove gcc binary so it doesn't interfere with clang
+rm -f /usr/bin/gcc
+# For interacting with the Go source & subrepos:
+apt-get install -y --no-install-recommends mercurial git-core
+
+apt-get clean
+rm -fr /var/lib/apt/lists
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-clang/sources/clang-deps.list b/llgo/third_party/go.tools/dashboard/env/linux-x86-clang/sources/clang-deps.list
new file mode 100644
index 0000000000000000000000000000000000000000..eb3e244a666021881f2f23f7dd156da74683bc31
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-clang/sources/clang-deps.list
@@ -0,0 +1,3 @@
+# The debian sources for stable clang builds, taken from http://llvm.org/apt/
+deb http://llvm.org/apt/wheezy/ llvm-toolchain-wheezy main
+deb-src http://llvm.org/apt/wheezy/ llvm-toolchain-wheezy main
\ No newline at end of file
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-gccgo/Dockerfile b/llgo/third_party/go.tools/dashboard/env/linux-x86-gccgo/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..2ccd0d936f6ee83fd271318feb67ebe296fd93c3
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-gccgo/Dockerfile
@@ -0,0 +1,19 @@
+# Copyright 2014 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# gobuilders/linux-x86-gccgo for 32- and 64-bit gccgo.
+
+FROM debian:wheezy
+MAINTAINER golang-dev
+
+ENV DEBIAN_FRONTEND noninteractive
+
+ADD /scripts/install-apt-deps.sh /scripts/
+RUN /scripts/install-apt-deps.sh
+
+ADD /scripts/install-gold.sh /scripts/
+RUN /scripts/install-gold.sh
+
+ADD /scripts/install-gccgo-builder.sh /scripts/
+RUN /scripts/install-gccgo-builder.sh && test -f /usr/local/bin/builder
\ No newline at end of file
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-gccgo/Makefile b/llgo/third_party/go.tools/dashboard/env/linux-x86-gccgo/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..9d5143fe23ba83a467c695b34e1174f44fb140b4
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-gccgo/Makefile
@@ -0,0 +1,15 @@
+# Copyright 2014 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+docker: Dockerfile
+ docker build -t gobuilders/linux-x86-gccgo .
+
+docker-linux.gccgo.tar.gz: docker
+ docker save gobuilders/linux-x86-gccgo | gzip | (cd ../../coordinator/buildongce && go run create.go --write_object=go-builder-data/docker-linux.gccgo.tar.gz)
+
+check: docker
+ docker run gobuilders/linux-x86-gccgo /usr/local/bin/builder -tool="gccgo" -rev=b9151e911a54 -v -cmd='make RUNTESTFLAGS="--target_board=unix/-m64" check-go' -report=false linux-amd64-gccgo-temp
+
+check32: docker
+ docker run gobuilders/linux-x86-gccgo /usr/local/bin/builder -tool="gccgo" -rev=b9151e911a54 -v -cmd='make RUNTESTFLAGS="--target_board=unix/-m32" check-go' -report=false linux-386-gccgo-temp
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-gccgo/README b/llgo/third_party/go.tools/dashboard/env/linux-x86-gccgo/README
new file mode 100644
index 0000000000000000000000000000000000000000..65180bc59f170cd7c38eee278051a32b5043cc7d
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-gccgo/README
@@ -0,0 +1,6 @@
+$ export BUILD=linux-amd64-gccgo
+$ export BUILDREV=b9151e911a54
+$ docker run \
+ -v $HOME/keys/$BUILD.buildkey:/.gobuildkey \
+ gobuilders/linux-x86-gccgo \
+ /usr/local/bin/builder -tool=gccgo -dashboard='https://build.golang.org/gccgo' -rev=$BUILDREV -buildroot=/gccgo -v -cmd='make check-go -kj' $BUILD
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-gccgo/scripts/install-apt-deps.sh b/llgo/third_party/go.tools/dashboard/env/linux-x86-gccgo/scripts/install-apt-deps.sh
new file mode 100755
index 0000000000000000000000000000000000000000..90dbac1043c44cdd3f0dd02e927883dad34ee7ad
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-gccgo/scripts/install-apt-deps.sh
@@ -0,0 +1,20 @@
+set -ex
+
+apt-get update
+# For running curl to get the gccgo builder binary.
+apt-get install -y --no-install-recommends curl ca-certificates
+# Optionally used by some net/http tests:
+apt-get install -y --no-install-recommends strace
+# For using numeric libraries within GCC.
+apt-get install -y --no-install-recommends libgmp10-dev libmpc-dev libmpfr-dev
+# For building binutils and gcc from source.
+apt-get install -y --no-install-recommends make g++ flex bison
+# Same as above, but for 32-bit builds as well.
+apt-get install -y --no-install-recommends libc6-dev-i386 g++-multilib
+# For running the extended gccgo testsuite
+apt-get install -y --no-install-recommends dejagnu
+# For interacting with the gccgo source and git mirror:
+apt-get install -y --no-install-recommends mercurial git-core
+
+apt-get clean
+rm -rf /var/lib/apt/lists
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-gccgo/scripts/install-gccgo-builder.sh b/llgo/third_party/go.tools/dashboard/env/linux-x86-gccgo/scripts/install-gccgo-builder.sh
new file mode 100755
index 0000000000000000000000000000000000000000..fd3785dc9342d00224b0d9adfb164e277038e541
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-gccgo/scripts/install-gccgo-builder.sh
@@ -0,0 +1,7 @@
+set -ex
+
+# Installs a version of the go.tools dashboard builder that runs the gccgo build
+# command assuming there are 16 cores available to speed up build times.
+# TODO(cmang): There should be an option in the builder to specify this.
+
+curl -o /usr/local/bin/builder http://storage.googleapis.com/go-builder-data/gccgo_builder && chmod +x /usr/local/bin/builder
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-gccgo/scripts/install-gold.sh b/llgo/third_party/go.tools/dashboard/env/linux-x86-gccgo/scripts/install-gold.sh
new file mode 100755
index 0000000000000000000000000000000000000000..77f96acab8e84531fc5d2df4ce63236674992fcb
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-gccgo/scripts/install-gold.sh
@@ -0,0 +1,9 @@
+set -ex
+
+# gccgo uses the Gold linker from binutils.
+export BINUTILS_VERSION=binutils-2.24
+mkdir -p binutils-objdir
+curl -s http://ftp.gnu.org/gnu/binutils/$BINUTILS_VERSION.tar.gz | tar x --no-same-owner -zv
+(cd binutils-objdir && ../$BINUTILS_VERSION/configure --enable-gold --enable-plugins --prefix=/opt/gold && make -sj && make install -sj)
+
+rm -rf binutils*
\ No newline at end of file
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-nacl/Dockerfile b/llgo/third_party/go.tools/dashboard/env/linux-x86-nacl/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..68a8df52f9ac9c3b0e8a7d9a8e11c49ec3f9379e
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-nacl/Dockerfile
@@ -0,0 +1,27 @@
+# Copyright 2014 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# gobuilders/linux-x86-nacl for 32- and 64-bit nacl.
+#
+# We need more modern libc than Debian stable as used in base, so we're
+# using Ubuntu LTS here.
+#
+# TODO(bradfitz): make both be Ubuntu? But we also want Debian, Fedora,
+# etc coverage., so deal with unifying these later, once there's a plan
+# or a generator for them and the other builders are turned down.
+
+FROM ubuntu:trusty
+MAINTAINER golang-dev
+
+ENV DEBIAN_FRONTEND noninteractive
+
+ADD /scripts/install-apt-deps.sh /scripts/
+RUN /scripts/install-apt-deps.sh
+
+ADD /scripts/build-go-builder.sh /scripts/
+RUN GO_REV=8c27884843c3 BUILDER_REV=ced78bfefcb3 /scripts/build-go-builder.sh && test -f /usr/local/bin/builder
+
+ADD build-command.pl /usr/local/bin/
+
+ENV PATH /usr/local/bin:$GOROOT/bin:$PATH
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-nacl/Makefile b/llgo/third_party/go.tools/dashboard/env/linux-x86-nacl/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..3c2b7e3a1d2115ebbc3ede309a7358656403df5d
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-nacl/Makefile
@@ -0,0 +1,12 @@
+# Copyright 2014 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+docker: Dockerfile
+ docker build -t gobuilders/linux-x86-nacl .
+
+upload: docker
+ docker save gobuilders/linux-x86-nacl | gzip | (cd ../../coordinator/buildongce && go run create.go --write_object=go-builder-data/docker-linux.nacl.tar.gz)
+
+check: docker
+ docker run gobuilders/linux-x86-nacl /usr/local/bin/builder -rev=8c27884843c3 -buildroot=/ -v -cmd=/usr/local/bin/build-command.pl -report=false nacl-amd64p32-temp
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-nacl/README b/llgo/third_party/go.tools/dashboard/env/linux-x86-nacl/README
new file mode 100644
index 0000000000000000000000000000000000000000..5862ee19b1bfef4d67aa976869138c6b34558d9b
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-nacl/README
@@ -0,0 +1,6 @@
+$ export BUILD=nacl-amd64p32-temp
+$ export BUILDREV=59b1bb4bf045
+$ docker run \
+ -v $HOME/keys/$BUILD.buildkey:/.gobuildkey \
+ gobuilders/linux-x86-nacl \
+ /usr/local/bin/builder -rev=$BUILDREV -buildroot=/ -v -cmd=/usr/local/bin/build-command.pl $BUILD
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-nacl/build-command.pl b/llgo/third_party/go.tools/dashboard/env/linux-x86-nacl/build-command.pl
new file mode 100755
index 0000000000000000000000000000000000000000..0eb9edbb7703a70d2d6d4a446d7ff6c8b49cdd29
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-nacl/build-command.pl
@@ -0,0 +1,13 @@
+#!/usr/bin/perl
+
+use strict;
+
+if ($ENV{GOOS} eq "nacl") {
+ delete $ENV{GOROOT_FINAL};
+ exec("./nacltest.bash", @ARGV);
+ die "Failed to run nacltest.bash: $!\n";
+}
+
+exec("./all.bash", @ARGV);
+die "Failed to run all.bash: $!\n";
+
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-nacl/scripts/build-go-builder.sh b/llgo/third_party/go.tools/dashboard/env/linux-x86-nacl/scripts/build-go-builder.sh
new file mode 100755
index 0000000000000000000000000000000000000000..10bf847b3f1eb4864749b964b6d2641e7b50b29a
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-nacl/scripts/build-go-builder.sh
@@ -0,0 +1,26 @@
+set -ex
+
+export GOPATH=/gopath
+export GOROOT=/goroot
+PREFIX=/usr/local
+: ${GO_REV:?"need to be set to the golang repo revision used to build the builder."}
+: ${BUILDER_REV:?"need to be set to the go.tools repo revision for the builder."}
+
+mkdir -p $GOROOT
+curl -s https://storage.googleapis.com/gobuilder/go-snap.tar.gz | tar x --no-same-owner -zv -C $GOROOT
+(cd $GOROOT/src && hg pull -r $GO_REV -u && find && ./make.bash)
+
+GO_TOOLS=$GOPATH/src/golang.org/x/tools
+mkdir -p $GO_TOOLS
+curl -s https://storage.googleapis.com/gobuilder/go.tools-snap.tar.gz | tar x --no-same-owner -zv -C $GO_TOOLS
+
+mkdir -p $PREFIX/bin
+(cd $GO_TOOLS && hg pull -r $BUILDER_REV -u && GOBIN=$PREFIX/bin /goroot/bin/go install golang.org/x/tools/dashboard/builder)
+
+rm -fR $GOROOT/bin $GOROOT/pkg $GOPATH
+
+(cd /usr/local/bin && curl -s -O https://storage.googleapis.com/gobuilder/sel_ldr_x86_32 && chmod +x sel_ldr_x86_32)
+(cd /usr/local/bin && curl -s -O https://storage.googleapis.com/gobuilder/sel_ldr_x86_64 && chmod +x sel_ldr_x86_64)
+
+ln -s $GOROOT/misc/nacl/go_nacl_386_exec /usr/local/bin/
+ln -s $GOROOT/misc/nacl/go_nacl_amd64p32_exec /usr/local/bin/
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-nacl/scripts/install-apt-deps.sh b/llgo/third_party/go.tools/dashboard/env/linux-x86-nacl/scripts/install-apt-deps.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f5186467618d536f6b58b19303c529c5c19d12c8
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-nacl/scripts/install-apt-deps.sh
@@ -0,0 +1,14 @@
+set -ex
+
+apt-get update
+# For running curl to get the hg starter tarballs (faster than hg clone).
+apt-get install -y --no-install-recommends curl ca-certificates
+# For building Go's bootstrap 'dist' prog
+apt-get install -y --no-install-recommends gcc libc6-dev
+# For interacting with the Go source & subrepos:
+apt-get install -y --no-install-recommends mercurial git-core
+# For 32-bit nacl:
+apt-get install -y --no-install-recommends libc6-i386 libc6-dev-i386 lib32stdc++6 gcc-multilib
+
+apt-get clean
+rm -fr /var/lib/apt/lists
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-sid/Dockerfile b/llgo/third_party/go.tools/dashboard/env/linux-x86-sid/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..057e93aed3e888fc2d727ee938d84af703a677dd
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-sid/Dockerfile
@@ -0,0 +1,14 @@
+# Copyright 2014 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+FROM debian:sid
+MAINTAINER golang-dev
+
+ENV DEBIAN_FRONTEND noninteractive
+
+ADD /scripts/install-apt-deps.sh /scripts/
+RUN /scripts/install-apt-deps.sh
+
+ADD /scripts/build-go-builder.sh /scripts/
+RUN GO_REV=8c27884843c3 BUILDER_REV=ced78bfefcb3 /scripts/build-go-builder.sh && test -f /usr/local/bin/builder
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-sid/Makefile b/llgo/third_party/go.tools/dashboard/env/linux-x86-sid/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..e636e49397587cd9f64940c06208d95c82f4ced2
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-sid/Makefile
@@ -0,0 +1,12 @@
+# Copyright 2014 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+docker: Dockerfile
+ docker build -t gobuilders/linux-x86-sid .
+
+docker-linux.sid.tar.gz: docker
+ docker save gobuilders/linux-x86-sid | gzip | (cd ../../coordinator/buildongce && go run create.go --write_object=go-builder-data/docker-linux.sid.tar.gz)
+
+check: docker
+ docker run gobuilders/linux-x86-sid /usr/local/bin/builder -rev=8c27884843c3 -buildroot=/ -v -report=false linux-amd64-sid
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-sid/scripts/build-go-builder.sh b/llgo/third_party/go.tools/dashboard/env/linux-x86-sid/scripts/build-go-builder.sh
new file mode 100755
index 0000000000000000000000000000000000000000..097ac171c184adcbeabf50dc1c01c73dee218cc7
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-sid/scripts/build-go-builder.sh
@@ -0,0 +1,20 @@
+set -ex
+
+export GOPATH=/gopath
+export GOROOT=/goroot
+PREFIX=/usr/local
+: ${GO_REV:?"need to be set to the golang repo revision used to build the builder."}
+: ${BUILDER_REV:?"need to be set to the go.tools repo revision for the builder."}
+
+mkdir -p $GOROOT
+curl -s https://storage.googleapis.com/gobuilder/go-snap.tar.gz | tar x --no-same-owner -zv -C $GOROOT
+(cd $GOROOT/src && hg pull -r $GO_REV -u && find && ./make.bash)
+
+GO_TOOLS=$GOPATH/src/golang.org/x/tools
+mkdir -p $GO_TOOLS
+curl -s https://storage.googleapis.com/gobuilder/go.tools-snap.tar.gz | tar x --no-same-owner -zv -C $GO_TOOLS
+
+mkdir -p $PREFIX/bin
+(cd $GO_TOOLS && hg pull -r $BUILDER_REV -u && GOBIN=$PREFIX/bin /goroot/bin/go install golang.org/x/tools/dashboard/builder)
+
+rm -fR $GOROOT/bin $GOROOT/pkg $GOPATH
diff --git a/llgo/third_party/go.tools/dashboard/env/linux-x86-sid/scripts/install-apt-deps.sh b/llgo/third_party/go.tools/dashboard/env/linux-x86-sid/scripts/install-apt-deps.sh
new file mode 100755
index 0000000000000000000000000000000000000000..839f4ad2fa02de0778ec13e500ae0b7a165f4d7b
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/env/linux-x86-sid/scripts/install-apt-deps.sh
@@ -0,0 +1,17 @@
+set -ex
+
+apt-get update
+# For running curl to get the hg starter tarballs (faster than hg clone).
+apt-get install -y --no-install-recommends curl ca-certificates
+# Optionally used by some net/http tests:
+apt-get install -y --no-install-recommends strace
+# For building Go's bootstrap 'dist' prog
+apt-get install -y --no-install-recommends gcc libc6-dev
+# For 32-bit builds:
+# TODO(bradfitz): move these into a 386 image that derives from this one.
+apt-get install -y --no-install-recommends libc6-dev-i386 gcc-multilib
+# For interacting with the Go source & subrepos:
+apt-get install -y --no-install-recommends mercurial git-core
+
+apt-get clean
+rm -fr /var/lib/apt/lists
diff --git a/llgo/third_party/go.tools/dashboard/updater/updater.go b/llgo/third_party/go.tools/dashboard/updater/updater.go
new file mode 100644
index 0000000000000000000000000000000000000000..0601611de02537e10a2028a9e19f01d771953996
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/updater/updater.go
@@ -0,0 +1,128 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "encoding/xml"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "os/exec"
+ "strings"
+)
+
+var (
+ builder = flag.String("builder", "", "builder name")
+ key = flag.String("key", "", "builder key")
+ gopath = flag.String("gopath", "", "path to go repo")
+ dashboard = flag.String("dashboard", "build.golang.org", "Go Dashboard Host")
+ batch = flag.Int("batch", 100, "upload batch size")
+)
+
+// Do not benchmark beyond this commit.
+// There is little sense in benchmarking till first commit,
+// and the benchmark won't build anyway.
+const Go1Commit = "0051c7442fed" // test/bench/shootout: update timing.log to Go 1.
+
+// HgLog represents a single Mercurial revision.
+type HgLog struct {
+ Hash string
+ Branch string
+ Files string
+}
+
+func main() {
+ flag.Parse()
+ logs := hgLog()
+ var hashes []string
+ ngo1 := 0
+ for i := range logs {
+ if strings.HasPrefix(logs[i].Hash, Go1Commit) {
+ break
+ }
+ if needsBenchmarking(&logs[i]) {
+ hashes = append(hashes, logs[i].Hash)
+ }
+ ngo1++
+ }
+ fmt.Printf("found %v commits, %v after Go1, %v need benchmarking\n", len(logs), ngo1, len(hashes))
+ for i := 0; i < len(hashes); i += *batch {
+ j := i + *batch
+ if j > len(hashes) {
+ j = len(hashes)
+ }
+ fmt.Printf("sending %v-%v... ", i, j)
+ res := postCommits(hashes[i:j])
+ fmt.Printf("%s\n", res)
+ }
+}
+
+func hgLog() []HgLog {
+ var out bytes.Buffer
+ cmd := exec.Command("hg", "log", "--encoding=utf-8", "--template", xmlLogTemplate)
+ cmd.Dir = *gopath
+ cmd.Stdout = &out
+ cmd.Stderr = os.Stderr
+ err := cmd.Run()
+ if err != nil {
+ fmt.Printf("failed to execute 'hg log': %v\n", err)
+ os.Exit(1)
+ }
+ var top struct{ Log []HgLog }
+ err = xml.Unmarshal([]byte(""+out.String()+" "), &top)
+ if err != nil {
+ fmt.Printf("failed to parse log: %v\n", err)
+ os.Exit(1)
+ }
+ return top.Log
+}
+
+func needsBenchmarking(log *HgLog) bool {
+ if log.Branch != "" {
+ return false
+ }
+ for _, f := range strings.Split(log.Files, " ") {
+ if (strings.HasPrefix(f, "include") || strings.HasPrefix(f, "src")) &&
+ !strings.HasSuffix(f, "_test.go") && !strings.Contains(f, "testdata") {
+ return true
+ }
+ }
+ return false
+}
+
+func postCommits(hashes []string) string {
+ args := url.Values{"builder": {*builder}, "key": {*key}}
+ cmd := fmt.Sprintf("http://%v/updatebenchmark?%v", *dashboard, args.Encode())
+ b, err := json.Marshal(hashes)
+ if err != nil {
+ return fmt.Sprintf("failed to encode request: %v\n", err)
+ }
+ r, err := http.Post(cmd, "text/json", bytes.NewReader(b))
+ if err != nil {
+ return fmt.Sprintf("failed to send http request: %v\n", err)
+ }
+ defer r.Body.Close()
+ if r.StatusCode != http.StatusOK {
+ return fmt.Sprintf("http request failed: %v\n", r.Status)
+ }
+ resp, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ return fmt.Sprintf("failed to read http response: %v\n", err)
+ }
+ return string(resp)
+}
+
+const xmlLogTemplate = `
+
+ {node|escape}
+ {branches}
+ {files}
+
+`
diff --git a/llgo/third_party/go.tools/dashboard/watcher/watcher.go b/llgo/third_party/go.tools/dashboard/watcher/watcher.go
new file mode 100644
index 0000000000000000000000000000000000000000..7c145f58a8c9439e93cc1e0ec75edbfb8387cabd
--- /dev/null
+++ b/llgo/third_party/go.tools/dashboard/watcher/watcher.go
@@ -0,0 +1,589 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Command watcher watches the specified repository for new commits
+// and reports them to the build dashboard.
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "encoding/xml"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/url"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+)
+
+var (
+ repoURL = flag.String("repo", "https://code.google.com/p/go", "Repository URL")
+ dashboard = flag.String("dash", "https://build.golang.org/", "Dashboard URL (must end in /)")
+ keyFile = flag.String("key", defaultKeyFile, "Build dashboard key file")
+ pollInterval = flag.Duration("poll", 10*time.Second, "Remote repo poll interval")
+)
+
+var (
+ defaultKeyFile = filepath.Join(homeDir(), ".gobuildkey")
+ dashboardKey = ""
+)
+
+// The first main repo commit on the dashboard; ignore commits before this.
+// This is for the main Go repo only.
+const dashboardStart = "2f970046e1ba96f32de62f5639b7141cda2e977c"
+
+func main() {
+ flag.Parse()
+
+ err := run()
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+}
+
+// run is a little wrapper so we can use defer and return to signal
+// errors. It should only return a non-nil error.
+func run() error {
+ if !strings.HasSuffix(*dashboard, "/") {
+ return errors.New("dashboard URL (-dashboard) must end in /")
+ }
+ if err := checkHgVersion(); err != nil {
+ return err
+ }
+
+ if k, err := readKey(); err != nil {
+ return err
+ } else {
+ dashboardKey = k
+ }
+
+ dir, err := ioutil.TempDir("", "watcher")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(dir)
+
+ errc := make(chan error)
+
+ go func() {
+ r, err := NewRepo(dir, *repoURL, "")
+ if err != nil {
+ errc <- err
+ return
+ }
+ errc <- r.Watch()
+ }()
+
+ subrepos, err := subrepoList()
+ if err != nil {
+ return err
+ }
+ for _, path := range subrepos {
+ go func(path string) {
+ url := "https://" + path
+ r, err := NewRepo(dir, url, path)
+ if err != nil {
+ errc <- err
+ return
+ }
+ errc <- r.Watch()
+ }(path)
+ }
+
+ // Must be non-nil.
+ return <-errc
+}
+
+// Repo represents a repository to be watched.
+type Repo struct {
+ root string // on-disk location of the hg repo
+ path string // base import path for repo (blank for main repo)
+ commits map[string]*Commit // keyed by full commit hash (40 lowercase hex digits)
+ branches map[string]*Branch // keyed by branch name, eg "release-branch.go1.3" (or empty for default)
+}
+
+// NewRepo checks out a new instance of the Mercurial repository
+// specified by url to a new directory inside dir.
+// The path argument is the base import path of the repository,
+// and should be empty for the main Go repo.
+func NewRepo(dir, url, path string) (*Repo, error) {
+ r := &Repo{
+ path: path,
+ root: filepath.Join(dir, filepath.Base(path)),
+ }
+
+ r.logf("cloning %v", url)
+ cmd := exec.Command("hg", "clone", url, r.root)
+ if out, err := cmd.CombinedOutput(); err != nil {
+ return nil, fmt.Errorf("%v\n\n%s", err, out)
+ }
+
+ r.logf("loading commit log")
+ if err := r.loadCommits(); err != nil {
+ return nil, err
+ }
+ if err := r.findBranches(); err != nil {
+ return nil, err
+ }
+
+ r.logf("found %v branches among %v commits\n", len(r.branches), len(r.commits))
+ return r, nil
+}
+
+// Watch continuously runs "hg pull" in the repo, checks for
+// new commits, and posts any new commits to the dashboard.
+// It only returns a non-nil error.
+func (r *Repo) Watch() error {
+ for {
+ if err := hgPull(r.root); err != nil {
+ return err
+ }
+ if err := r.update(); err != nil {
+ return err
+ }
+ for _, b := range r.branches {
+ if err := r.postNewCommits(b); err != nil {
+ return err
+ }
+ }
+ time.Sleep(*pollInterval)
+ }
+}
+
+func (r *Repo) logf(format string, args ...interface{}) {
+ p := "go"
+ if r.path != "" {
+ p = path.Base(r.path)
+ }
+ log.Printf(p+": "+format, args...)
+}
+
+// postNewCommits looks for unseen commits on the specified branch and
+// posts them to the dashboard.
+func (r *Repo) postNewCommits(b *Branch) error {
+ if b.Head == b.LastSeen {
+ return nil
+ }
+ c := b.LastSeen
+ if c == nil {
+ // Haven't seen any: find the commit that this branch forked from.
+ for c := b.Head; c.Branch == b.Name; c = c.parent {
+ }
+ }
+ // Add unseen commits on this branch, working forward from last seen.
+ for c.children != nil {
+ // Find the next commit on this branch.
+ var next *Commit
+ for _, c2 := range c.children {
+ if c2.Branch != b.Name {
+ continue
+ }
+ if next != nil {
+ // Shouldn't happen, but be paranoid.
+ return fmt.Errorf("found multiple children of %v on branch %q: %v and %v", c, b.Name, next, c2)
+ }
+ next = c2
+ }
+ if next == nil {
+ // No more children on this branch, bail.
+ break
+ }
+ // Found it.
+ c = next
+
+ if err := r.postCommit(c); err != nil {
+ return err
+ }
+ b.LastSeen = c
+ }
+ return nil
+}
+
+// postCommit sends a commit to the build dashboard.
+func (r *Repo) postCommit(c *Commit) error {
+ r.logf("sending commit to dashboard: %v", c)
+
+ t, err := time.Parse(time.RFC3339, c.Date)
+ if err != nil {
+ return err
+ }
+ dc := struct {
+ PackagePath string // (empty for main repo commits)
+ Hash string
+ ParentHash string
+
+ User string
+ Desc string
+ Time time.Time
+
+ NeedsBenchmarking bool
+ }{
+ PackagePath: r.path,
+ Hash: c.Hash,
+ ParentHash: c.Parent,
+
+ User: c.Author,
+ Desc: c.Desc,
+ Time: t,
+
+ NeedsBenchmarking: c.NeedsBenchmarking(),
+ }
+ b, err := json.Marshal(dc)
+ if err != nil {
+ return err
+ }
+
+ u := *dashboard + "commit?version=2&key=" + dashboardKey
+ resp, err := http.Post(u, "text/json", bytes.NewReader(b))
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != 200 {
+ return fmt.Errorf("status: %v", resp.Status)
+ }
+ return nil
+}
+
+// loadCommits runs "hg log" and populates the Repo's commit map.
+func (r *Repo) loadCommits() error {
+ log, err := hgLog(r.root)
+ if err != nil {
+ return err
+ }
+ r.commits = make(map[string]*Commit)
+ for _, c := range log {
+ r.commits[c.Hash] = c
+ }
+ for _, c := range r.commits {
+ if p, ok := r.commits[c.Parent]; ok {
+ c.parent = p
+ p.children = append(p.children, c)
+ }
+ }
+ return nil
+}
+
+// findBranches finds branch heads in the Repo's commit map
+// and populates its branch map.
+func (r *Repo) findBranches() error {
+ r.branches = make(map[string]*Branch)
+ for _, c := range r.commits {
+ if c.children == nil {
+ if !validHead(c) {
+ continue
+ }
+ seen, err := r.lastSeen(c.Hash)
+ if err != nil {
+ return err
+ }
+ b := &Branch{Name: c.Branch, Head: c, LastSeen: seen}
+ r.branches[c.Branch] = b
+ r.logf("found branch: %v", b)
+ }
+ }
+ return nil
+}
+
+// validHead reports whether the specified commit should be considered a branch
+// head. It considers pre-go1 branches and certain specific commits as invalid.
+func validHead(c *Commit) bool {
+ // Pre Go-1 releases branches are irrelevant.
+ if strings.HasPrefix(c.Branch, "release-branch.r") {
+ return false
+ }
+ // Not sure why these revisions have no child commits,
+ // but they're old so let's just ignore them.
+ if c.Hash == "b59f4ff1b51094314f735a4d57a2b8f06cfadf15" ||
+ c.Hash == "fc75f13840b896e82b9fa6165cf705fbacaf019c" {
+ return false
+ }
+ // All other branches are valid.
+ return true
+}
+
+// update runs "hg pull" in the specified reporoot,
+// looks for new commits and branches,
+// and updates the comits and branches maps.
+func (r *Repo) update() error {
+ // TODO(adg): detect new branches with "hg branches".
+
+ // Check each branch for new commits.
+ for _, b := range r.branches {
+
+ // Find all commits on this branch from known head.
+ // The logic of this function assumes that "hg log $HASH:"
+ // returns hashes in the order they were committed (parent first).
+ bname := b.Name
+ if bname == "" {
+ bname = "default"
+ }
+ log, err := hgLog(r.root, "-r", b.Head.Hash+":", "-b", bname)
+ if err != nil {
+ return err
+ }
+
+ // Add unknown commits to r.commits, and update branch head.
+ for _, c := range log {
+ // Ignore if we already know this commit.
+ if _, ok := r.commits[c.Hash]; ok {
+ continue
+ }
+ r.logf("found new commit %v", c)
+
+ // Sanity check that we're looking at a commit on this branch.
+ if c.Branch != b.Name {
+ return fmt.Errorf("hg log gave us a commit from wrong branch: want %q, got %q", b.Name, c.Branch)
+ }
+
+ // Find parent commit.
+ p, ok := r.commits[c.Parent]
+ if !ok {
+ return fmt.Errorf("can't find parent hash %q for %v", c.Parent, c)
+ }
+
+ // Link parent and child Commits.
+ c.parent = p
+ p.children = append(p.children, c)
+
+ // Update branch head.
+ b.Head = c
+
+ // Add new commit to map.
+ r.commits[c.Hash] = c
+ }
+ }
+
+ return nil
+}
+
+// lastSeen finds the most recent commit the dashboard has seen,
+// starting at the specified head. If the dashboard hasn't seen
+// any of the commits from head to the beginning, it returns nil.
+func (r *Repo) lastSeen(head string) (*Commit, error) {
+ h, ok := r.commits[head]
+ if !ok {
+ return nil, fmt.Errorf("lastSeen: can't find %q in commits", head)
+ }
+
+ var s []*Commit
+ for c := h; c != nil; c = c.parent {
+ s = append(s, c)
+ if r.path == "" && c.Hash == dashboardStart {
+ break
+ }
+ }
+
+ for _, c := range s {
+ v := url.Values{"hash": {c.Hash}, "packagePath": {r.path}}
+ u := *dashboard + "commit?" + v.Encode()
+ r, err := http.Get(u)
+ if err != nil {
+ return nil, err
+ }
+ var resp struct {
+ Error string
+ }
+ err = json.NewDecoder(r.Body).Decode(&resp)
+ r.Body.Close()
+ if err != nil {
+ return nil, err
+ }
+ switch resp.Error {
+ case "":
+ // Found one.
+ return c, nil
+ case "Commit not found":
+ // Commit not found, keep looking for earlier commits.
+ continue
+ default:
+ return nil, fmt.Errorf("dashboard: %v", resp.Error)
+ }
+ }
+
+ // Dashboard saw no commits.
+ return nil, nil
+}
+
+// hgLog runs "hg log" with the supplied arguments
+// and parses the output into Commit values.
+func hgLog(dir string, args ...string) ([]*Commit, error) {
+ args = append([]string{"log", "--template", xmlLogTemplate}, args...)
+ cmd := exec.Command("hg", args...)
+ cmd.Dir = dir
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return nil, err
+ }
+
+ // We have a commit with description that contains 0x1b byte.
+ // Mercurial does not escape it, but xml.Unmarshal does not accept it.
+ out = bytes.Replace(out, []byte{0x1b}, []byte{'?'}, -1)
+
+ xr := io.MultiReader(
+ strings.NewReader(""),
+ bytes.NewReader(out),
+ strings.NewReader(" "),
+ )
+ var logStruct struct {
+ Log []*Commit
+ }
+ err = xml.NewDecoder(xr).Decode(&logStruct)
+ if err != nil {
+ return nil, err
+ }
+ return logStruct.Log, nil
+}
+
+// hgPull runs "hg pull" in the specified directory.
+// It tries three times, just in case it failed because of a transient error.
+func hgPull(dir string) error {
+ var err error
+ for tries := 0; tries < 3; tries++ {
+ time.Sleep(time.Duration(tries) * 5 * time.Second) // Linear back-off.
+ cmd := exec.Command("hg", "pull")
+ cmd.Dir = dir
+ if out, e := cmd.CombinedOutput(); err != nil {
+ e = fmt.Errorf("%v\n\n%s", e, out)
+ log.Printf("hg pull error %v: %v", dir, e)
+ if err == nil {
+ err = e
+ }
+ continue
+ }
+ return nil
+ }
+ return err
+}
+
+// Branch represents a Mercurial branch.
+type Branch struct {
+ Name string
+ Head *Commit
+ LastSeen *Commit // the last commit posted to the dashboard
+}
+
+func (b *Branch) String() string {
+ return fmt.Sprintf("%q(Head: %v LastSeen: %v)", b.Name, b.Head, b.LastSeen)
+}
+
+// Commit represents a single Mercurial revision.
+type Commit struct {
+ Hash string
+ Author string
+ Date string
+ Desc string // Plain text, first linefeed-terminated line is a short description.
+ Parent string
+ Branch string
+ Files string
+
+ // For walking the graph.
+ parent *Commit
+ children []*Commit
+}
+
+func (c *Commit) String() string {
+ return fmt.Sprintf("%v(%q)", c.Hash, strings.SplitN(c.Desc, "\n", 2)[0])
+}
+
+// NeedsBenchmarking reports whether the Commit needs benchmarking.
+func (c *Commit) NeedsBenchmarking() bool {
+ // Do not benchmark branch commits, they are usually not interesting
+ // and fall out of the trunk succession.
+ if c.Branch != "" {
+ return false
+ }
+ // Do not benchmark commits that do not touch source files (e.g. CONTRIBUTORS).
+ for _, f := range strings.Split(c.Files, " ") {
+ if (strings.HasPrefix(f, "include") || strings.HasPrefix(f, "src")) &&
+ !strings.HasSuffix(f, "_test.go") && !strings.Contains(f, "testdata") {
+ return true
+ }
+ }
+ return false
+}
+
+// xmlLogTemplate is a template to pass to Mercurial to make
+// hg log print the log in valid XML for parsing with xml.Unmarshal.
+// Can not escape branches and files, because it crashes python with:
+// AttributeError: 'NoneType' object has no attribute 'replace'
+const xmlLogTemplate = `
+
+ {node|escape}
+ {p1node}
+ {author|escape}
+ {date|rfc3339date}
+ {desc|escape}
+ {branches}
+ {files}
+
+`
+
+func homeDir() string {
+ switch runtime.GOOS {
+ case "plan9":
+ return os.Getenv("home")
+ case "windows":
+ return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
+ }
+ return os.Getenv("HOME")
+}
+
+func readKey() (string, error) {
+ c, err := ioutil.ReadFile(*keyFile)
+ if err != nil {
+ return "", err
+ }
+ return string(bytes.TrimSpace(bytes.SplitN(c, []byte("\n"), 2)[0])), nil
+}
+
+// subrepoList fetches a list of sub-repositories from the dashboard
+// and returns them as a slice of base import paths.
+// Eg, []string{"golang.org/x/tools", "golang.org/x/net"}.
+func subrepoList() ([]string, error) {
+ r, err := http.Get(*dashboard + "packages?kind=subrepo")
+ if err != nil {
+ return nil, err
+ }
+ var resp struct {
+ Response []struct {
+ Path string
+ }
+ Error string
+ }
+ err = json.NewDecoder(r.Body).Decode(&resp)
+ r.Body.Close()
+ if err != nil {
+ return nil, err
+ }
+ if resp.Error != "" {
+ return nil, errors.New(resp.Error)
+ }
+ var pkgs []string
+ for _, r := range resp.Response {
+ pkgs = append(pkgs, r.Path)
+ }
+ return pkgs, nil
+}
+
+// checkHgVersion checks whether the installed version of hg supports the
+// template features we need. (May not be precise.)
+func checkHgVersion() error {
+ out, err := exec.Command("hg", "help", "templates").CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("error running hg help templates: %v\n\n%s", err, out)
+ }
+ if !bytes.Contains(out, []byte("p1node")) {
+ return errors.New("installed hg doesn't support 'p1node' template keyword; please upgrade")
+ }
+ return nil
+}
diff --git a/llgo/third_party/go.tools/go/buildutil/allpackages.go b/llgo/third_party/go.tools/go/buildutil/allpackages.go
new file mode 100644
index 0000000000000000000000000000000000000000..1da5560e17a4f578a597cb407fee7802a91a9b85
--- /dev/null
+++ b/llgo/third_party/go.tools/go/buildutil/allpackages.go
@@ -0,0 +1,111 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package buildutil provides utilities related to the go/build
+// package in the standard library.
+//
+// All I/O is done via the build.Context file system interface, which must
+// be concurrency-safe.
+package buildutil
+
+import (
+ "go/build"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+)
+
+// AllPackages returns the import path of each Go package in any source
+// directory of the specified build context (e.g. $GOROOT or an element
+// of $GOPATH). Errors are ignored. The results are sorted.
+//
+// The result may include import paths for directories that contain no
+// *.go files, such as "archive" (in $GOROOT/src).
+//
+// All I/O is done via the build.Context file system interface,
+// which must be concurrency-safe.
+//
+func AllPackages(ctxt *build.Context) []string {
+ var list []string
+ var mu sync.Mutex
+ ForEachPackage(ctxt, func(pkg string, _ error) {
+ mu.Lock()
+ list = append(list, pkg)
+ mu.Unlock()
+ })
+ sort.Strings(list)
+ return list
+}
+
+// ForEachPackage calls the found function with the import path of
+// each Go package it finds in any source directory of the specified
+// build context (e.g. $GOROOT or an element of $GOPATH).
+//
+// If the package directory exists but could not be read, the second
+// argument to the found function provides the error.
+//
+// The found function and the build.Context file system interface
+// accessors must be concurrency safe.
+//
+func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) {
+ // We use a counting semaphore to limit
+ // the number of parallel calls to ReadDir.
+ sema := make(chan bool, 20)
+
+ var wg sync.WaitGroup
+ for _, root := range ctxt.SrcDirs() {
+ root := root
+ wg.Add(1)
+ go func() {
+ allPackages(ctxt, sema, root, found)
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+func allPackages(ctxt *build.Context, sema chan bool, root string, found func(string, error)) {
+ root = filepath.Clean(root) + string(os.PathSeparator)
+
+ var wg sync.WaitGroup
+
+ var walkDir func(dir string)
+ walkDir = func(dir string) {
+ // Avoid .foo, _foo, and testdata directory trees.
+ base := filepath.Base(dir)
+ if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" {
+ return
+ }
+
+ pkg := filepath.ToSlash(strings.TrimPrefix(dir, root))
+
+ // Prune search if we encounter any of these import paths.
+ switch pkg {
+ case "builtin":
+ return
+ }
+
+ sema <- true
+ files, err := ReadDir(ctxt, dir)
+ <-sema
+ if pkg != "" || err != nil {
+ found(pkg, err)
+ }
+ for _, fi := range files {
+ fi := fi
+ if fi.IsDir() {
+ wg.Add(1)
+ go func() {
+ walkDir(filepath.Join(dir, fi.Name()))
+ wg.Done()
+ }()
+ }
+ }
+ }
+
+ walkDir(root)
+ wg.Wait()
+}
diff --git a/llgo/third_party/go.tools/go/buildutil/allpackages_test.go b/llgo/third_party/go.tools/go/buildutil/allpackages_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..db6529e72eafb730694a292c18ab235c85cb7dac
--- /dev/null
+++ b/llgo/third_party/go.tools/go/buildutil/allpackages_test.go
@@ -0,0 +1,32 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buildutil_test
+
+import (
+ "go/build"
+ "testing"
+
+ "llvm.org/llgo/third_party/go.tools/go/buildutil"
+)
+
+func TestAllPackages(t *testing.T) {
+ all := buildutil.AllPackages(&build.Default)
+
+ set := make(map[string]bool)
+ for _, pkg := range all {
+ set[pkg] = true
+ }
+
+ const wantAtLeast = 250
+ if len(all) < wantAtLeast {
+ t.Errorf("Found only %d packages, want at least %d", len(all), wantAtLeast)
+ }
+
+ for _, want := range []string{"fmt", "crypto/sha256", "llvm.org/llgo/third_party/go.tools/go/buildutil"} {
+ if !set[want] {
+ t.Errorf("Package %q not found; got %s", want, all)
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/go/buildutil/util.go b/llgo/third_party/go.tools/go/buildutil/util.go
new file mode 100644
index 0000000000000000000000000000000000000000..60eeae253030b4af92ead410edf0c44116951580
--- /dev/null
+++ b/llgo/third_party/go.tools/go/buildutil/util.go
@@ -0,0 +1,158 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buildutil
+
+import (
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+)
+
+// ParseFile behaves like parser.ParseFile,
+// but uses the build context's file system interface, if any.
+//
+// If file is not absolute (as defined by IsAbsPath), the (dir, file)
+// components are joined using JoinPath; dir must be absolute.
+//
+// The displayPath function, if provided, is used to transform the
+// filename that will be attached to the ASTs.
+//
+// TODO(adonovan): call this from go/loader.parseFiles when the tree thaws.
+//
+func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, file string, mode parser.Mode) (*ast.File, error) {
+ if !IsAbsPath(ctxt, file) {
+ file = JoinPath(ctxt, dir, file)
+ }
+ rd, err := OpenFile(ctxt, file)
+ if err != nil {
+ return nil, err
+ }
+ defer rd.Close() // ignore error
+ if displayPath != nil {
+ file = displayPath(file)
+ }
+ return parser.ParseFile(fset, file, rd, mode)
+}
+
+// ContainingPackage returns the package containing filename.
+//
+// If filename is not absolute, it is interpreted relative to working directory dir.
+// All I/O is via the build context's file system interface, if any.
+//
+// The '...Files []string' fields of the resulting build.Package are not
+// populated (build.FindOnly mode).
+//
+// TODO(adonovan): call this from oracle when the tree thaws.
+//
+func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Package, error) {
+ if !IsAbsPath(ctxt, filename) {
+ filename = JoinPath(ctxt, dir, filename)
+ }
+
+ // We must not assume the file tree uses
+ // "/" always,
+ // `\` always,
+ // or os.PathSeparator (which varies by platform),
+ // but to make any progress, we are forced to assume that
+ // paths will not use `\` unless the PathSeparator
+ // is also `\`, thus we can rely on filepath.ToSlash for some sanity.
+
+ dirSlash := path.Dir(filepath.ToSlash(filename)) + "/"
+
+ // We assume that no source root (GOPATH[i] or GOROOT) contains any other.
+ for _, srcdir := range ctxt.SrcDirs() {
+ srcdirSlash := filepath.ToSlash(srcdir) + "/"
+ if strings.HasPrefix(dirSlash, srcdirSlash) {
+ importPath := dirSlash[len(srcdirSlash) : len(dirSlash)-len("/")]
+ return ctxt.Import(importPath, dir, build.FindOnly)
+ }
+ }
+
+ return nil, fmt.Errorf("can't find package containing %s", filename)
+}
+
+// -- Effective methods of file system interface -------------------------
+
+// (go/build.Context defines these as methods, but does not export them.)
+
+// TODO(adonovan): HasSubdir?
+
+// FileExists returns true if the specified file exists,
+// using the build context's file system interface.
+func FileExists(ctxt *build.Context, path string) bool {
+ if ctxt.OpenFile != nil {
+ r, err := ctxt.OpenFile(path)
+ if err != nil {
+ return false
+ }
+ r.Close() // ignore error
+ return true
+ }
+ _, err := os.Stat(path)
+ return err == nil
+}
+
+// OpenFile behaves like os.Open,
+// but uses the build context's file system interface, if any.
+func OpenFile(ctxt *build.Context, path string) (io.ReadCloser, error) {
+ if ctxt.OpenFile != nil {
+ return ctxt.OpenFile(path)
+ }
+ return os.Open(path)
+}
+
+// IsAbsPath behaves like filepath.IsAbs,
+// but uses the build context's file system interface, if any.
+func IsAbsPath(ctxt *build.Context, path string) bool {
+ if ctxt.IsAbsPath != nil {
+ return ctxt.IsAbsPath(path)
+ }
+ return filepath.IsAbs(path)
+}
+
+// JoinPath behaves like filepath.Join,
+// but uses the build context's file system interface, if any.
+func JoinPath(ctxt *build.Context, path ...string) string {
+ if ctxt.JoinPath != nil {
+ return ctxt.JoinPath(path...)
+ }
+ return filepath.Join(path...)
+}
+
+// IsDir behaves like os.Stat plus IsDir,
+// but uses the build context's file system interface, if any.
+func IsDir(ctxt *build.Context, path string) bool {
+ if ctxt.IsDir != nil {
+ return ctxt.IsDir(path)
+ }
+ fi, err := os.Stat(path)
+ return err == nil && fi.IsDir()
+}
+
+// ReadDir behaves like ioutil.ReadDir,
+// but uses the build context's file system interface, if any.
+func ReadDir(ctxt *build.Context, path string) ([]os.FileInfo, error) {
+ if ctxt.ReadDir != nil {
+ return ctxt.ReadDir(path)
+ }
+ return ioutil.ReadDir(path)
+}
+
+// SplitPathList behaves like filepath.SplitList,
+// but uses the build context's file system interface, if any.
+func SplitPathList(ctxt *build.Context, s string) []string {
+ if ctxt.SplitPathList != nil {
+ return ctxt.SplitPathList(s)
+ }
+ return filepath.SplitList(s)
+}
diff --git a/llgo/third_party/go.tools/go/buildutil/util_test.go b/llgo/third_party/go.tools/go/buildutil/util_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..586bd611f4e8b31f8e5d3abf8c1216d7e829bf32
--- /dev/null
+++ b/llgo/third_party/go.tools/go/buildutil/util_test.go
@@ -0,0 +1,41 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buildutil_test
+
+import (
+ "go/build"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+
+ "llvm.org/llgo/third_party/go.tools/go/buildutil"
+)
+
+func TestContainingPackage(t *testing.T) {
+ // unvirtualized:
+ goroot := runtime.GOROOT()
+ gopath := filepath.SplitList(os.Getenv("GOPATH"))[0]
+
+ for _, test := range [][2]string{
+ {goroot + "/src/fmt/print.go", "fmt"},
+ {goroot + "/src/encoding/json/foo.go", "encoding/json"},
+ {goroot + "/src/encoding/missing/foo.go", "(not found)"},
+ {gopath + "/src/golang.org/x/tools/go/buildutil/util_test.go",
+ "llvm.org/llgo/third_party/go.tools/go/buildutil"},
+ } {
+ file, want := test[0], test[1]
+ bp, err := buildutil.ContainingPackage(&build.Default, ".", file)
+ got := bp.ImportPath
+ if err != nil {
+ got = "(not found)"
+ }
+ if got != want {
+ t.Errorf("ContainingPackage(%q) = %s, want %s", file, got, want)
+ }
+ }
+
+ // TODO(adonovan): test on virtualized GOPATH too.
+}
diff --git a/llgo/third_party/go.tools/go/callgraph/callgraph.go b/llgo/third_party/go.tools/go/callgraph/callgraph.go
new file mode 100644
index 0000000000000000000000000000000000000000..52a983658357ac63da7ab42ccaebcb465c9e968d
--- /dev/null
+++ b/llgo/third_party/go.tools/go/callgraph/callgraph.go
@@ -0,0 +1,123 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+
+Package callgraph defines the call graph and various algorithms
+and utilities to operate on it.
+
+A call graph is a labelled directed graph whose nodes represent
+functions and whose edge labels represent syntactic function call
+sites. The presence of a labelled edge (caller, site, callee)
+indicates that caller may call callee at the specified call site.
+
+A call graph is a multigraph: it may contain multiple edges (caller,
+*, callee) connecting the same pair of nodes, so long as the edges
+differ by label; this occurs when one function calls another function
+from multiple call sites. Also, it may contain multiple edges
+(caller, site, *) that differ only by callee; this indicates a
+polymorphic call.
+
+A SOUND call graph is one that overapproximates the dynamic calling
+behaviors of the program in all possible executions. One call graph
+is more PRECISE than another if it is a smaller overapproximation of
+the dynamic behavior.
+
+All call graphs have a synthetic root node which is responsible for
+calling main() and init().
+
+Calls to built-in functions (e.g. panic, println) are not represented
+in the call graph; they are treated like built-in operators of the
+language.
+
+*/
+package callgraph
+
+// TODO(adonovan): add a function to eliminate wrappers from the
+// callgraph, preserving topology.
+// More generally, we could eliminate "uninteresting" nodes such as
+// nodes from packages we don't care about.
+
+import (
+ "fmt"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+)
+
+// A Graph represents a call graph.
+//
+// A graph may contain nodes that are not reachable from the root.
+// If the call graph is sound, such nodes indicate unreachable
+// functions.
+//
+type Graph struct {
+ Root *Node // the distinguished root node
+ Nodes map[*ssa.Function]*Node // all nodes by function
+}
+
+// New returns a new Graph with the specified root node.
+func New(root *ssa.Function) *Graph {
+ g := &Graph{Nodes: make(map[*ssa.Function]*Node)}
+ g.Root = g.CreateNode(root)
+ return g
+}
+
+// CreateNode returns the Node for fn, creating it if not present.
+func (g *Graph) CreateNode(fn *ssa.Function) *Node {
+ n, ok := g.Nodes[fn]
+ if !ok {
+ n = &Node{Func: fn, ID: len(g.Nodes)}
+ g.Nodes[fn] = n
+ }
+ return n
+}
+
+// A Node represents a node in a call graph.
+type Node struct {
+ Func *ssa.Function // the function this node represents
+ ID int // 0-based sequence number
+ In []*Edge // unordered set of incoming call edges (n.In[*].Callee == n)
+ Out []*Edge // unordered set of outgoing call edges (n.Out[*].Caller == n)
+}
+
+func (n *Node) String() string {
+ return fmt.Sprintf("n%d:%s", n.ID, n.Func)
+}
+
+// A Edge represents an edge in the call graph.
+//
+// Site is nil for edges originating in synthetic or intrinsic
+// functions, e.g. reflect.Call or the root of the call graph.
+type Edge struct {
+ Caller *Node
+ Site ssa.CallInstruction
+ Callee *Node
+}
+
+func (e Edge) String() string {
+ return fmt.Sprintf("%s --> %s", e.Caller, e.Callee)
+}
+
+func (e Edge) Description() string {
+ if e.Site == nil {
+ return "synthetic call"
+ }
+ return e.Site.Common().Description()
+}
+
+func (e Edge) Pos() token.Pos {
+ if e.Site == nil {
+ return token.NoPos
+ }
+ return e.Site.Pos()
+}
+
+// AddEdge adds the edge (caller, site, callee) to the call graph.
+// Elimination of duplicate edges is the caller's responsibility.
+func AddEdge(caller *Node, site ssa.CallInstruction, callee *Node) {
+ e := &Edge{caller, site, callee}
+ callee.In = append(callee.In, e)
+ caller.Out = append(caller.Out, e)
+}
diff --git a/llgo/third_party/go.tools/go/callgraph/rta/rta.go b/llgo/third_party/go.tools/go/callgraph/rta/rta.go
new file mode 100644
index 0000000000000000000000000000000000000000..1427db238c570ae9b312513cfa315db252f575f3
--- /dev/null
+++ b/llgo/third_party/go.tools/go/callgraph/rta/rta.go
@@ -0,0 +1,459 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This package provides Rapid Type Analysis (RTA) for Go, a fast
+// algorithm for call graph construction and discovery of reachable code
+// (and hence dead code) and runtime types. The algorithm was first
+// described in:
+//
+// David F. Bacon and Peter F. Sweeney. 1996.
+// Fast static analysis of C++ virtual function calls. (OOPSLA '96)
+// http://doi.acm.org/10.1145/236337.236371
+//
+// The algorithm uses dynamic programming to tabulate the cross-product
+// of the set of known "address taken" functions with the set of known
+// dynamic calls of the same type. As each new address-taken function
+// is discovered, call graph edges are added from each known callsite,
+// and as each new call site is discovered, call graph edges are added
+// from it to each known address-taken function.
+//
+// A similar approach is used for dynamic calls via interfaces: it
+// tabulates the cross-product of the set of known "runtime types",
+// i.e. types that may appear in an interface value, or be derived from
+// one via reflection, with the set of known "invoke"-mode dynamic
+// calls. As each new "runtime type" is discovered, call edges are
+// added from the known call sites, and as each new call site is
+// discovered, call graph edges are added to each compatible
+// method.
+//
+// In addition, we must consider all exported methods of any runtime type
+// as reachable, since they may be called via reflection.
+//
+// Each time a newly added call edge causes a new function to become
+// reachable, the code of that function is analyzed for more call sites,
+// address-taken functions, and runtime types. The process continues
+// until a fixed point is achieved.
+//
+// The resulting call graph is less precise than one produced by pointer
+// analysis, but the algorithm is much faster. For example, running the
+// cmd/callgraph tool on its own source takes ~2.1s for RTA and ~5.4s
+// for points-to analysis.
+//
+package rta
+
+// TODO(adonovan): test it by connecting it to the interpreter and
+// replacing all "unreachable" functions by a special intrinsic, and
+// ensure that that intrinsic is never called.
+
+import (
+ "fmt"
+
+ "llvm.org/llgo/third_party/go.tools/go/callgraph"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llgo/third_party/go.tools/go/types/typeutil"
+)
+
+// A Result holds the results of Rapid Type Analysis, which includes the
+// set of reachable functions/methods, runtime types, and the call graph.
+//
+type Result struct {
+ // CallGraph is the discovered callgraph.
+ // It does not include edges for calls made via reflection.
+ CallGraph *callgraph.Graph
+
+ // Reachable contains the set of reachable functions and methods.
+ // This includes exported methods of runtime types, since
+ // they may be accessed via reflection.
+ // The value indicates whether the function is address-taken.
+ //
+ // (We wrap the bool in a struct to avoid inadvertent use of
+ // "if Reachable[f] {" to test for set membership.)
+ Reachable map[*ssa.Function]struct{ AddrTaken bool }
+
+ // RuntimeTypes contains the set of types that are needed at
+ // runtime, for interfaces or reflection.
+ //
+ // The value indicates whether the type is inaccessible to reflection.
+ // Consider:
+ // type A struct{B}
+ // fmt.Println(new(A))
+ // Types *A, A and B are accessible to reflection, but the unnamed
+ // type struct{B} is not.
+ RuntimeTypes typeutil.Map
+}
+
+// Working state of the RTA algorithm.
+type rta struct {
+ result *Result
+
+ prog *ssa.Program
+
+ worklist []*ssa.Function // list of functions to visit
+
+ // addrTakenFuncsBySig contains all address-taken *Functions, grouped by signature.
+ // Keys are *types.Signature, values are map[*ssa.Function]bool sets.
+ addrTakenFuncsBySig typeutil.Map
+
+ // dynCallSites contains all dynamic "call"-mode call sites, grouped by signature.
+ // Keys are *types.Signature, values are unordered []ssa.CallInstruction.
+ dynCallSites typeutil.Map
+
+ // invokeSites contains all "invoke"-mode call sites, grouped by interface.
+ // Keys are *types.Interface (never *types.Named),
+ // Values are unordered []ssa.CallInstruction sets.
+ invokeSites typeutil.Map
+
+ // The following two maps together define the subset of the
+ // m:n "implements" relation needed by the algorithm.
+
+ // concreteTypes maps each concrete type to the set of interfaces that it implements.
+ // Keys are types.Type, values are unordered []*types.Interface.
+ // Only concrete types used as MakeInterface operands are included.
+ concreteTypes typeutil.Map
+
+ // interfaceTypes maps each interface type to
+ // the set of concrete types that implement it.
+ // Keys are *types.Interface, values are unordered []types.Type.
+ // Only interfaces used in "invoke"-mode CallInstructions are included.
+ interfaceTypes typeutil.Map
+}
+
+// addReachable marks a function as potentially callable at run-time,
+// and ensures that it gets processed.
+func (r *rta) addReachable(f *ssa.Function, addrTaken bool) {
+ reachable := r.result.Reachable
+ n := len(reachable)
+ v := reachable[f]
+ if addrTaken {
+ v.AddrTaken = true
+ }
+ reachable[f] = v
+ if len(reachable) > n {
+ // First time seeing f. Add it to the worklist.
+ r.worklist = append(r.worklist, f)
+ }
+}
+
+// addEdge adds the specified call graph edge, and marks it reachable.
+// addrTaken indicates whether to mark the callee as "address-taken".
+func (r *rta) addEdge(site ssa.CallInstruction, callee *ssa.Function, addrTaken bool) {
+ r.addReachable(callee, addrTaken)
+
+ if g := r.result.CallGraph; g != nil {
+ if site.Parent() == nil {
+ panic(site)
+ }
+ from := g.CreateNode(site.Parent())
+ to := g.CreateNode(callee)
+ callgraph.AddEdge(from, site, to)
+ }
+}
+
+// ---------- addrTakenFuncs × dynCallSites ----------
+
+// visitAddrTakenFunc is called each time we encounter an address-taken function f.
+func (r *rta) visitAddrTakenFunc(f *ssa.Function) {
+ // Create two-level map (Signature -> Function -> bool).
+ S := f.Signature
+ funcs, _ := r.addrTakenFuncsBySig.At(S).(map[*ssa.Function]bool)
+ if funcs == nil {
+ funcs = make(map[*ssa.Function]bool)
+ r.addrTakenFuncsBySig.Set(S, funcs)
+ }
+ if !funcs[f] {
+ // First time seeing f.
+ funcs[f] = true
+
+ // If we've seen any dyncalls of this type, mark it reachable,
+ // and add call graph edges.
+ sites, _ := r.dynCallSites.At(S).([]ssa.CallInstruction)
+ for _, site := range sites {
+ r.addEdge(site, f, true)
+ }
+ }
+}
+
+// visitDynCall is called each time we encounter a dynamic "call"-mode call.
+func (r *rta) visitDynCall(site ssa.CallInstruction) {
+ S := site.Common().Signature()
+
+ // Record the call site.
+ sites, _ := r.dynCallSites.At(S).([]ssa.CallInstruction)
+ r.dynCallSites.Set(S, append(sites, site))
+
+ // For each function of signature S that we know is address-taken,
+ // mark it reachable. We'll add the callgraph edges later.
+ funcs, _ := r.addrTakenFuncsBySig.At(S).(map[*ssa.Function]bool)
+ for g := range funcs {
+ r.addEdge(site, g, true)
+ }
+}
+
+// ---------- concrete types × invoke sites ----------
+
+// addInvokeEdge is called for each new pair (site, C) in the matrix.
+func (r *rta) addInvokeEdge(site ssa.CallInstruction, C types.Type) {
+ // Ascertain the concrete method of C to be called.
+ imethod := site.Common().Method
+ cmethod := r.prog.Method(r.prog.MethodSets.MethodSet(C).Lookup(imethod.Pkg(), imethod.Name()))
+ r.addEdge(site, cmethod, true)
+}
+
+// visitInvoke is called each time the algorithm encounters an "invoke"-mode call.
+func (r *rta) visitInvoke(site ssa.CallInstruction) {
+ I := site.Common().Value.Type().Underlying().(*types.Interface)
+
+ // Record the invoke site.
+ sites, _ := r.invokeSites.At(I).([]ssa.CallInstruction)
+ r.invokeSites.Set(I, append(sites, site))
+
+ // Add callgraph edge for each existing
+ // address-taken concrete type implementing I.
+ for _, C := range r.implementations(I) {
+ r.addInvokeEdge(site, C)
+ }
+}
+
+// ---------- main algorithm ----------
+
+// visitFunc processes function f.
+func (r *rta) visitFunc(f *ssa.Function) {
+ var space [32]*ssa.Value // preallocate space for common case
+
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ rands := instr.Operands(space[:0])
+
+ switch instr := instr.(type) {
+ case ssa.CallInstruction:
+ call := instr.Common()
+ if call.IsInvoke() {
+ r.visitInvoke(instr)
+ } else if g := call.StaticCallee(); g != nil {
+ r.addEdge(instr, g, false)
+ } else if _, ok := call.Value.(*ssa.Builtin); !ok {
+ r.visitDynCall(instr)
+ }
+
+ // Ignore the call-position operand when
+ // looking for address-taken Functions.
+ // Hack: assume this is rands[0].
+ rands = rands[1:]
+
+ case *ssa.MakeInterface:
+ r.addRuntimeType(instr.X.Type(), false)
+ }
+
+ // Process all address-taken functions.
+ for _, op := range rands {
+ if g, ok := (*op).(*ssa.Function); ok {
+ r.visitAddrTakenFunc(g)
+ }
+ }
+ }
+ }
+}
+
+// Analyze performs Rapid Type Analysis, starting at the specified root
+// functions. It returns nil if no roots were specified.
+//
+// If buildCallGraph is true, Result.CallGraph will contain a call
+// graph; otherwise, only the other fields (reachable functions) are
+// populated.
+//
+func Analyze(roots []*ssa.Function, buildCallGraph bool) *Result {
+ if len(roots) == 0 {
+ return nil
+ }
+
+ r := &rta{
+ result: &Result{Reachable: make(map[*ssa.Function]struct{ AddrTaken bool })},
+ prog: roots[0].Prog,
+ }
+
+ if buildCallGraph {
+ // TODO(adonovan): change callgraph API to eliminate the
+ // notion of a distinguished root node. Some callgraphs
+ // have many roots, or none.
+ r.result.CallGraph = callgraph.New(roots[0])
+ }
+
+ hasher := typeutil.MakeHasher()
+ r.result.RuntimeTypes.SetHasher(hasher)
+ r.addrTakenFuncsBySig.SetHasher(hasher)
+ r.dynCallSites.SetHasher(hasher)
+ r.invokeSites.SetHasher(hasher)
+ r.concreteTypes.SetHasher(hasher)
+ r.interfaceTypes.SetHasher(hasher)
+
+ // Visit functions, processing their instructions, and adding
+ // new functions to the worklist, until a fixed point is
+ // reached.
+ var shadow []*ssa.Function // for efficiency, we double-buffer the worklist
+ r.worklist = append(r.worklist, roots...)
+ for len(r.worklist) > 0 {
+ shadow, r.worklist = r.worklist, shadow[:0]
+ for _, f := range shadow {
+ r.visitFunc(f)
+ }
+ }
+ return r.result
+}
+
+// interfaces(C) returns all currently known interfaces implemented by C.
+func (r *rta) interfaces(C types.Type) []*types.Interface {
+ // Ascertain set of interfaces C implements
+ // and update 'implements' relation.
+ var ifaces []*types.Interface
+ r.interfaceTypes.Iterate(func(I types.Type, concs interface{}) {
+ if I := I.(*types.Interface); types.Implements(C, I) {
+ concs, _ := concs.([]types.Type)
+ r.interfaceTypes.Set(I, append(concs, C))
+ ifaces = append(ifaces, I)
+ }
+ })
+ r.concreteTypes.Set(C, ifaces)
+ return ifaces
+}
+
+// implementations(I) returns all currently known concrete types that implement I.
+func (r *rta) implementations(I *types.Interface) []types.Type {
+ var concs []types.Type
+ if v := r.interfaceTypes.At(I); v != nil {
+ concs = v.([]types.Type)
+ } else {
+ // First time seeing this interface.
+ // Update the 'implements' relation.
+ r.concreteTypes.Iterate(func(C types.Type, ifaces interface{}) {
+ if types.Implements(C, I) {
+ ifaces, _ := ifaces.([]*types.Interface)
+ r.concreteTypes.Set(C, append(ifaces, I))
+ concs = append(concs, C)
+ }
+ })
+ r.interfaceTypes.Set(I, concs)
+ }
+ return concs
+}
+
+// addRuntimeType is called for each concrete type that can be the
+// dynamic type of some interface or reflect.Value.
+// Adapted from needMethods in go/ssa/builder.go
+//
+func (r *rta) addRuntimeType(T types.Type, skip bool) {
+ if prev, ok := r.result.RuntimeTypes.At(T).(bool); ok {
+ if skip && !prev {
+ r.result.RuntimeTypes.Set(T, skip)
+ }
+ return
+ }
+ r.result.RuntimeTypes.Set(T, skip)
+
+ mset := r.prog.MethodSets.MethodSet(T)
+
+ if _, ok := T.Underlying().(*types.Interface); !ok {
+ // T is a new concrete type.
+ for i, n := 0, mset.Len(); i < n; i++ {
+ sel := mset.At(i)
+ m := sel.Obj()
+
+ if m.Exported() {
+ // Exported methods are always potentially callable via reflection.
+ r.addReachable(r.prog.Method(sel), true)
+ }
+ }
+
+ // Add callgraph edge for each existing dynamic
+ // "invoke"-mode call via that interface.
+ for _, I := range r.interfaces(T) {
+ sites, _ := r.invokeSites.At(I).([]ssa.CallInstruction)
+ for _, site := range sites {
+ r.addInvokeEdge(site, T)
+ }
+ }
+ }
+
+ // Precondition: T is not a method signature (*Signature with Recv()!=nil).
+ // Recursive case: skip => don't call makeMethods(T).
+ // Each package maintains its own set of types it has visited.
+
+ var n *types.Named
+ switch T := T.(type) {
+ case *types.Named:
+ n = T
+ case *types.Pointer:
+ n, _ = T.Elem().(*types.Named)
+ }
+ if n != nil {
+ owner := n.Obj().Pkg()
+ if owner == nil {
+ return // built-in error type
+ }
+ }
+
+ // Recursion over signatures of each exported method.
+ for i := 0; i < mset.Len(); i++ {
+ if mset.At(i).Obj().Exported() {
+ sig := mset.At(i).Type().(*types.Signature)
+ r.addRuntimeType(sig.Params(), true) // skip the Tuple itself
+ r.addRuntimeType(sig.Results(), true) // skip the Tuple itself
+ }
+ }
+
+ switch t := T.(type) {
+ case *types.Basic:
+ // nop
+
+ case *types.Interface:
+ // nop---handled by recursion over method set.
+
+ case *types.Pointer:
+ r.addRuntimeType(t.Elem(), false)
+
+ case *types.Slice:
+ r.addRuntimeType(t.Elem(), false)
+
+ case *types.Chan:
+ r.addRuntimeType(t.Elem(), false)
+
+ case *types.Map:
+ r.addRuntimeType(t.Key(), false)
+ r.addRuntimeType(t.Elem(), false)
+
+ case *types.Signature:
+ if t.Recv() != nil {
+ panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv()))
+ }
+ r.addRuntimeType(t.Params(), true) // skip the Tuple itself
+ r.addRuntimeType(t.Results(), true) // skip the Tuple itself
+
+ case *types.Named:
+ // A pointer-to-named type can be derived from a named
+ // type via reflection. It may have methods too.
+ r.addRuntimeType(types.NewPointer(T), false)
+
+ // Consider 'type T struct{S}' where S has methods.
+ // Reflection provides no way to get from T to struct{S},
+ // only to S, so the method set of struct{S} is unwanted,
+ // so set 'skip' flag during recursion.
+ r.addRuntimeType(t.Underlying(), true)
+
+ case *types.Array:
+ r.addRuntimeType(t.Elem(), false)
+
+ case *types.Struct:
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ r.addRuntimeType(t.Field(i).Type(), false)
+ }
+
+ case *types.Tuple:
+ for i, n := 0, t.Len(); i < n; i++ {
+ r.addRuntimeType(t.At(i).Type(), false)
+ }
+
+ default:
+ panic(T)
+ }
+}
diff --git a/llgo/third_party/go.tools/go/callgraph/rta/rta_test.go b/llgo/third_party/go.tools/go/callgraph/rta/rta_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..bbfa0111915492bb825598954010dab9d581ece8
--- /dev/null
+++ b/llgo/third_party/go.tools/go/callgraph/rta/rta_test.go
@@ -0,0 +1,135 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rta_test
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "sort"
+ "strings"
+ "testing"
+
+ "llvm.org/llgo/third_party/go.tools/go/callgraph"
+ "llvm.org/llgo/third_party/go.tools/go/callgraph/rta"
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+var inputs = []string{
+ "testdata/func.go",
+ "testdata/rtype.go",
+ "testdata/iface.go",
+}
+
+func expectation(f *ast.File) (string, token.Pos) {
+ for _, c := range f.Comments {
+ text := strings.TrimSpace(c.Text())
+ if t := strings.TrimPrefix(text, "WANT:\n"); t != text {
+ return t, c.Pos()
+ }
+ }
+ return "", token.NoPos
+}
+
+// TestRTA runs RTA on each file in inputs, prints the results, and
+// compares it with the golden results embedded in the WANT comment at
+// the end of the file.
+//
+// The results string consists of two parts: the set of dynamic call
+// edges, "f --> g", one per line, and the set of reachable functions,
+// one per line. Each set is sorted.
+//
+func TestRTA(t *testing.T) {
+ for _, filename := range inputs {
+ content, err := ioutil.ReadFile(filename)
+ if err != nil {
+ t.Errorf("couldn't read file '%s': %s", filename, err)
+ continue
+ }
+
+ conf := loader.Config{
+ SourceImports: true,
+ ParserMode: parser.ParseComments,
+ }
+ f, err := conf.ParseFile(filename, content)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ want, pos := expectation(f)
+ if pos == token.NoPos {
+ t.Errorf("No WANT: comment in %s", filename)
+ continue
+ }
+
+ conf.CreateFromFiles("main", f)
+ iprog, err := conf.Load()
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ prog := ssa.Create(iprog, 0)
+ mainPkg := prog.Package(iprog.Created[0].Pkg)
+ prog.BuildAll()
+
+ res := rta.Analyze([]*ssa.Function{
+ mainPkg.Func("main"),
+ mainPkg.Func("init"),
+ }, true)
+
+ if got := printResult(res, mainPkg.Object); got != want {
+ t.Errorf("%s: got:\n%s\nwant:\n%s",
+ prog.Fset.Position(pos), got, want)
+ }
+ }
+}
+
+func printResult(res *rta.Result, from *types.Package) string {
+ var buf bytes.Buffer
+
+ writeSorted := func(ss []string) {
+ sort.Strings(ss)
+ for _, s := range ss {
+ fmt.Fprintf(&buf, " %s\n", s)
+ }
+ }
+
+ buf.WriteString("Dynamic calls\n")
+ var edges []string
+ callgraph.GraphVisitEdges(res.CallGraph, func(e *callgraph.Edge) error {
+ if strings.Contains(e.Description(), "dynamic") {
+ edges = append(edges, fmt.Sprintf("%s --> %s",
+ e.Caller.Func.RelString(from),
+ e.Callee.Func.RelString(from)))
+ }
+ return nil
+ })
+ writeSorted(edges)
+
+ buf.WriteString("Reachable functions\n")
+ var reachable []string
+ for f := range res.Reachable {
+ reachable = append(reachable, f.RelString(from))
+ }
+ writeSorted(reachable)
+
+ buf.WriteString("Reflect types\n")
+ var rtypes []string
+ res.RuntimeTypes.Iterate(func(key types.Type, value interface{}) {
+ if value == false { // accessible to reflection
+ rtypes = append(rtypes, types.TypeString(from, key))
+ }
+ })
+ writeSorted(rtypes)
+
+ return strings.TrimSpace(buf.String())
+}
diff --git a/llgo/third_party/go.tools/go/callgraph/rta/testdata/func.go b/llgo/third_party/go.tools/go/callgraph/rta/testdata/func.go
new file mode 100644
index 0000000000000000000000000000000000000000..968c73d80e956bcc98fa42732c898527f92fb55c
--- /dev/null
+++ b/llgo/third_party/go.tools/go/callgraph/rta/testdata/func.go
@@ -0,0 +1,37 @@
+//+build ignore
+
+package main
+
+// Test of dynamic function calls.
+// No interfaces, so no runtime/reflect types.
+
+func A1() {
+ A2(0)
+}
+
+func A2(int) {} // not address-taken
+
+func B() {} // unreachable
+
+var (
+ C = func(int) {}
+ D = func(int) {}
+)
+
+func main() {
+ A1()
+
+ pfn := C
+ pfn(0) // calls C and D but not A2 (same sig but not address-taken)
+}
+
+// WANT:
+// Dynamic calls
+// main --> init$1
+// main --> init$2
+// Reachable functions
+// A1
+// A2
+// init$1
+// init$2
+// Reflect types
diff --git a/llgo/third_party/go.tools/go/callgraph/rta/testdata/iface.go b/llgo/third_party/go.tools/go/callgraph/rta/testdata/iface.go
new file mode 100644
index 0000000000000000000000000000000000000000..c3ee57049f107a47ae85001eb524d9077789ae89
--- /dev/null
+++ b/llgo/third_party/go.tools/go/callgraph/rta/testdata/iface.go
@@ -0,0 +1,79 @@
+//+build ignore
+
+package main
+
+// Test of interface calls.
+
+func use(interface{})
+
+type A byte // instantiated but not a reflect type
+
+func (A) f() {} // called directly
+func (A) F() {} // unreachable
+
+type B int // a reflect type
+
+func (*B) f() {} // reachable via interface invoke
+func (*B) F() {} // reachable: exported method of reflect type
+
+type B2 int // a reflect type, and *B2 also
+
+func (B2) f() {} // reachable via interface invoke
+func (B2) g() {} // reachable: exported method of reflect type
+
+type C string // not instantiated
+
+func (C) f() {} // unreachable
+func (C) F() {} // unreachable
+
+type D uint // instantiated only in dead code
+
+func (D) f() {} // unreachable
+func (D) F() {} // unreachable
+
+func main() {
+ A(0).f()
+
+ use(new(B))
+ use(B2(0))
+
+ var i interface {
+ f()
+ }
+ i.f() // calls (*B).f, (*B2).f and (B2.f)
+
+ live()
+}
+
+func live() {
+ var j interface {
+ f()
+ g()
+ }
+ j.f() // calls (B2).f and (*B2).f but not (*B).f (no g method).
+}
+
+func dead() {
+ use(D(0))
+}
+
+// WANT:
+// Dynamic calls
+// live --> (*B2).f
+// live --> (B2).f
+// main --> (*B).f
+// main --> (*B2).f
+// main --> (B2).f
+// Reachable functions
+// (*B).F
+// (*B).f
+// (*B2).f
+// (A).f
+// (B2).f
+// live
+// use
+// Reflect types
+// *B
+// *B2
+// B
+// B2
diff --git a/llgo/third_party/go.tools/go/callgraph/rta/testdata/rtype.go b/llgo/third_party/go.tools/go/callgraph/rta/testdata/rtype.go
new file mode 100644
index 0000000000000000000000000000000000000000..85414e55303f5e7b5e8c808f0c016db7aca16873
--- /dev/null
+++ b/llgo/third_party/go.tools/go/callgraph/rta/testdata/rtype.go
@@ -0,0 +1,35 @@
+//+build ignore
+
+package main
+
+// Test of runtime types (types for which descriptors are needed).
+
+func use(interface{})
+
+type A byte // neither A nor byte are runtime types
+
+type B struct{ x uint } // B and uint are runtime types, but not the struct
+
+func main() {
+ var x int // not a runtime type
+ print(x)
+
+ var y string // runtime type due to interface conversion
+ use(y)
+
+ use(struct{ uint64 }{}) // struct is a runtime type
+
+ use(new(B)) // *B is a runtime type
+}
+
+// WANT:
+// Dynamic calls
+// Reachable functions
+// use
+// Reflect types
+// *B
+// B
+// string
+// struct{uint64}
+// uint
+// uint64
diff --git a/llgo/third_party/go.tools/go/callgraph/util.go b/llgo/third_party/go.tools/go/callgraph/util.go
new file mode 100644
index 0000000000000000000000000000000000000000..1d3448e6ed1109883bf02547410bd7cddc45ffe3
--- /dev/null
+++ b/llgo/third_party/go.tools/go/callgraph/util.go
@@ -0,0 +1,181 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package callgraph
+
+import "llvm.org/llgo/third_party/go.tools/go/ssa"
+
+// This file provides various utilities over call graphs, such as
+// visitation and path search.
+
+// CalleesOf returns a new set containing all direct callees of the
+// caller node.
+//
+func CalleesOf(caller *Node) map[*Node]bool {
+ callees := make(map[*Node]bool)
+ for _, e := range caller.Out {
+ callees[e.Callee] = true
+ }
+ return callees
+}
+
+// GraphVisitEdges visits all the edges in graph g in depth-first order.
+// The edge function is called for each edge in postorder. If it
+// returns non-nil, visitation stops and GraphVisitEdges returns that
+// value.
+//
+func GraphVisitEdges(g *Graph, edge func(*Edge) error) error {
+ seen := make(map[*Node]bool)
+ var visit func(n *Node) error
+ visit = func(n *Node) error {
+ if !seen[n] {
+ seen[n] = true
+ for _, e := range n.Out {
+ if err := visit(e.Callee); err != nil {
+ return err
+ }
+ if err := edge(e); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+ }
+ for _, n := range g.Nodes {
+ if err := visit(n); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// PathSearch finds an arbitrary path starting at node start and
+// ending at some node for which isEnd() returns true. On success,
+// PathSearch returns the path as an ordered list of edges; on
+// failure, it returns nil.
+//
+func PathSearch(start *Node, isEnd func(*Node) bool) []*Edge {
+ stack := make([]*Edge, 0, 32)
+ seen := make(map[*Node]bool)
+ var search func(n *Node) []*Edge
+ search = func(n *Node) []*Edge {
+ if !seen[n] {
+ seen[n] = true
+ if isEnd(n) {
+ return stack
+ }
+ for _, e := range n.Out {
+ stack = append(stack, e) // push
+ if found := search(e.Callee); found != nil {
+ return found
+ }
+ stack = stack[:len(stack)-1] // pop
+ }
+ }
+ return nil
+ }
+ return search(start)
+}
+
+// DeleteSyntheticNodes removes from call graph g all nodes for
+// synthetic functions (except g.Root and package initializers),
+// preserving the topology. In effect, calls to synthetic wrappers
+// are "inlined".
+//
+func (g *Graph) DeleteSyntheticNodes() {
+ // Measurements on the standard library and go.tools show that
+ // resulting graph has ~15% fewer nodes and 4-8% fewer edges
+ // than the input.
+ //
+ // Inlining a wrapper of in-degree m, out-degree n adds m*n
+ // and removes m+n edges. Since most wrappers are monomorphic
+ // (n=1) this results in a slight reduction. Polymorphic
+ // wrappers (n>1), e.g. from embedding an interface value
+ // inside a struct to satisfy some interface, cause an
+ // increase in the graph, but they seem to be uncommon.
+
+ // Hash all existing edges to avoid creating duplicates.
+ edges := make(map[Edge]bool)
+ for _, cgn := range g.Nodes {
+ for _, e := range cgn.Out {
+ edges[*e] = true
+ }
+ }
+ for fn, cgn := range g.Nodes {
+ if cgn == g.Root || fn.Synthetic == "" || isInit(cgn.Func) {
+ continue // keep
+ }
+ for _, eIn := range cgn.In {
+ for _, eOut := range cgn.Out {
+ newEdge := Edge{eIn.Caller, eIn.Site, eOut.Callee}
+ if edges[newEdge] {
+ continue // don't add duplicate
+ }
+ AddEdge(eIn.Caller, eIn.Site, eOut.Callee)
+ edges[newEdge] = true
+ }
+ }
+ g.DeleteNode(cgn)
+ }
+}
+
+func isInit(fn *ssa.Function) bool {
+ return fn.Pkg != nil && fn.Pkg.Func("init") == fn
+}
+
+// DeleteNode removes node n and its edges from the graph g.
+// (NB: not efficient for batch deletion.)
+func (g *Graph) DeleteNode(n *Node) {
+ n.deleteIns()
+ n.deleteOuts()
+ delete(g.Nodes, n.Func)
+}
+
+// deleteIns deletes all incoming edges to n.
+func (n *Node) deleteIns() {
+ for _, e := range n.In {
+ removeOutEdge(e)
+ }
+ n.In = nil
+}
+
+// deleteOuts deletes all outgoing edges from n.
+func (n *Node) deleteOuts() {
+ for _, e := range n.Out {
+ removeInEdge(e)
+ }
+ n.Out = nil
+}
+
+// removeOutEdge removes edge.Caller's outgoing edge 'edge'.
+func removeOutEdge(edge *Edge) {
+ caller := edge.Caller
+ n := len(caller.Out)
+ for i, e := range caller.Out {
+ if e == edge {
+ // Replace it with the final element and shrink the slice.
+ caller.Out[i] = caller.Out[n-1]
+ caller.Out[n-1] = nil // aid GC
+ caller.Out = caller.Out[:n-1]
+ return
+ }
+ }
+ panic("edge not found: " + edge.String())
+}
+
+// removeInEdge removes edge.Callee's incoming edge 'edge'.
+func removeInEdge(edge *Edge) {
+ caller := edge.Callee
+ n := len(caller.In)
+ for i, e := range caller.In {
+ if e == edge {
+ // Replace it with the final element and shrink the slice.
+ caller.In[i] = caller.In[n-1]
+ caller.In[n-1] = nil // aid GC
+ caller.In = caller.In[:n-1]
+ return
+ }
+ }
+ panic("edge not found: " + edge.String())
+}
diff --git a/llgo/third_party/go.tools/go/exact/exact.go b/llgo/third_party/go.tools/go/exact/exact.go
new file mode 100644
index 0000000000000000000000000000000000000000..06d591888a205ff43b3706260b319769d8935c03
--- /dev/null
+++ b/llgo/third_party/go.tools/go/exact/exact.go
@@ -0,0 +1,918 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package exact implements Values representing untyped
+// Go constants and the corresponding operations. Values
+// and operations have unlimited precision.
+//
+// A special Unknown value may be used when a value
+// is unknown due to an error. Operations on unknown
+// values produce unknown values unless specified
+// otherwise.
+//
+package exact
+
+import (
+ "fmt"
+ "go/token"
+ "math/big"
+ "strconv"
+)
+
+// Kind specifies the kind of value represented by a Value.
+type Kind int
+
+// Implementation note: Kinds must be enumerated in
+// order of increasing "complexity" (used by match).
+
+const (
+ // unknown values
+ Unknown Kind = iota
+
+ // non-numeric values
+ Bool
+ String
+
+ // numeric values
+ Int
+ Float
+ Complex
+)
+
+// A Value represents a mathematically exact value of a given Kind.
+type Value interface {
+ // Kind returns the value kind; it is always the smallest
+ // kind in which the value can be represented exactly.
+ Kind() Kind
+
+ // String returns a human-readable form of the value.
+ String() string
+
+ // Prevent external implementations.
+ implementsValue()
+}
+
+// ----------------------------------------------------------------------------
+// Implementations
+
+type (
+ unknownVal struct{}
+ boolVal bool
+ stringVal string
+ int64Val int64
+ intVal struct{ val *big.Int }
+ floatVal struct{ val *big.Rat }
+ complexVal struct{ re, im *big.Rat }
+)
+
+func (unknownVal) Kind() Kind { return Unknown }
+func (boolVal) Kind() Kind { return Bool }
+func (stringVal) Kind() Kind { return String }
+func (int64Val) Kind() Kind { return Int }
+func (intVal) Kind() Kind { return Int }
+func (floatVal) Kind() Kind { return Float }
+func (complexVal) Kind() Kind { return Complex }
+
+func (unknownVal) String() string { return "unknown" }
+func (x boolVal) String() string { return fmt.Sprintf("%v", bool(x)) }
+func (x stringVal) String() string { return strconv.Quote(string(x)) }
+func (x int64Val) String() string { return strconv.FormatInt(int64(x), 10) }
+func (x intVal) String() string { return x.val.String() }
+func (x floatVal) String() string { return x.val.String() }
+func (x complexVal) String() string { return fmt.Sprintf("(%s + %si)", x.re, x.im) }
+
+func (unknownVal) implementsValue() {}
+func (boolVal) implementsValue() {}
+func (stringVal) implementsValue() {}
+func (int64Val) implementsValue() {}
+func (intVal) implementsValue() {}
+func (floatVal) implementsValue() {}
+func (complexVal) implementsValue() {}
+
+// int64 bounds
+var (
+ minInt64 = big.NewInt(-1 << 63)
+ maxInt64 = big.NewInt(1<<63 - 1)
+)
+
+func normInt(x *big.Int) Value {
+ if minInt64.Cmp(x) <= 0 && x.Cmp(maxInt64) <= 0 {
+ return int64Val(x.Int64())
+ }
+ return intVal{x}
+}
+
+func normFloat(x *big.Rat) Value {
+ if x.IsInt() {
+ return normInt(x.Num())
+ }
+ return floatVal{x}
+}
+
+func normComplex(re, im *big.Rat) Value {
+ if im.Sign() == 0 {
+ return normFloat(re)
+ }
+ return complexVal{re, im}
+}
+
+// ----------------------------------------------------------------------------
+// Factories
+
+// MakeUnknown returns the Unknown value.
+func MakeUnknown() Value { return unknownVal{} }
+
+// MakeBool returns the Bool value for x.
+func MakeBool(b bool) Value { return boolVal(b) }
+
+// MakeString returns the String value for x.
+func MakeString(s string) Value { return stringVal(s) }
+
+// MakeInt64 returns the Int value for x.
+func MakeInt64(x int64) Value { return int64Val(x) }
+
+// MakeUint64 returns the Int value for x.
+func MakeUint64(x uint64) Value { return normInt(new(big.Int).SetUint64(x)) }
+
+// MakeFloat64 returns the numeric value for x.
+// If x is not finite, the result is unknown.
+func MakeFloat64(x float64) Value {
+ if f := new(big.Rat).SetFloat64(x); f != nil {
+ return normFloat(f)
+ }
+ return unknownVal{}
+}
+
+// MakeFromLiteral returns the corresponding integer, floating-point,
+// imaginary, character, or string value for a Go literal string. The
+// result is nil if the literal string is invalid.
+func MakeFromLiteral(lit string, tok token.Token) Value {
+ switch tok {
+ case token.INT:
+ if x, err := strconv.ParseInt(lit, 0, 64); err == nil {
+ return int64Val(x)
+ }
+ if x, ok := new(big.Int).SetString(lit, 0); ok {
+ return intVal{x}
+ }
+
+ case token.FLOAT:
+ if x, ok := new(big.Rat).SetString(lit); ok {
+ return normFloat(x)
+ }
+
+ case token.IMAG:
+ if n := len(lit); n > 0 && lit[n-1] == 'i' {
+ if im, ok := new(big.Rat).SetString(lit[0 : n-1]); ok {
+ return normComplex(big.NewRat(0, 1), im)
+ }
+ }
+
+ case token.CHAR:
+ if n := len(lit); n >= 2 {
+ if code, _, _, err := strconv.UnquoteChar(lit[1:n-1], '\''); err == nil {
+ return int64Val(code)
+ }
+ }
+
+ case token.STRING:
+ if s, err := strconv.Unquote(lit); err == nil {
+ return stringVal(s)
+ }
+ }
+
+ return nil
+}
+
+// ----------------------------------------------------------------------------
+// Accessors
+//
+// For unknown arguments the result is the zero value for the respective
+// accessor type, except for Sign, where the result is 1.
+
+// BoolVal returns the Go boolean value of x, which must be a Bool or an Unknown.
+// If x is Unknown, the result is false.
+func BoolVal(x Value) bool {
+ switch x := x.(type) {
+ case boolVal:
+ return bool(x)
+ case unknownVal:
+ return false
+ }
+ panic(fmt.Sprintf("%v not a Bool", x))
+}
+
+// StringVal returns the Go string value of x, which must be a String or an Unknown.
+// If x is Unknown, the result is "".
+func StringVal(x Value) string {
+ switch x := x.(type) {
+ case stringVal:
+ return string(x)
+ case unknownVal:
+ return ""
+ }
+ panic(fmt.Sprintf("%v not a String", x))
+}
+
+// Int64Val returns the Go int64 value of x and whether the result is exact;
+// x must be an Int or an Unknown. If the result is not exact, its value is undefined.
+// If x is Unknown, the result is (0, false).
+func Int64Val(x Value) (int64, bool) {
+ switch x := x.(type) {
+ case int64Val:
+ return int64(x), true
+ case intVal:
+ return x.val.Int64(), x.val.BitLen() <= 63
+ case unknownVal:
+ return 0, false
+ }
+ panic(fmt.Sprintf("%v not an Int", x))
+}
+
+// Uint64Val returns the Go uint64 value of x and whether the result is exact;
+// x must be an Int or an Unknown. If the result is not exact, its value is undefined.
+// If x is Unknown, the result is (0, false).
+func Uint64Val(x Value) (uint64, bool) {
+ switch x := x.(type) {
+ case int64Val:
+ return uint64(x), x >= 0
+ case intVal:
+ return x.val.Uint64(), x.val.Sign() >= 0 && x.val.BitLen() <= 64
+ case unknownVal:
+ return 0, false
+ }
+ panic(fmt.Sprintf("%v not an Int", x))
+}
+
+// Float32Val is like Float64Val but for float32 instead of float64.
+func Float32Val(x Value) (float32, bool) {
+ switch x := x.(type) {
+ case int64Val:
+ f := float32(x)
+ return f, int64Val(f) == x
+ case intVal:
+ return ratToFloat32(new(big.Rat).SetFrac(x.val, int1))
+ case floatVal:
+ return ratToFloat32(x.val)
+ case unknownVal:
+ return 0, false
+ }
+ panic(fmt.Sprintf("%v not a Float", x))
+}
+
+// Float64Val returns the nearest Go float64 value of x and whether the result is exact;
+// x must be numeric but not Complex, or Unknown. For values too small (too close to 0)
+// to represent as float64, Float64Val silently underflows to 0. The result sign always
+// matches the sign of x, even for 0.
+// If x is Unknown, the result is (0, false).
+func Float64Val(x Value) (float64, bool) {
+ switch x := x.(type) {
+ case int64Val:
+ f := float64(int64(x))
+ return f, int64Val(f) == x
+ case intVal:
+ return new(big.Rat).SetFrac(x.val, int1).Float64()
+ case floatVal:
+ return x.val.Float64()
+ case unknownVal:
+ return 0, false
+ }
+ panic(fmt.Sprintf("%v not a Float", x))
+}
+
+// BitLen returns the number of bits required to represent
+// the absolute value x in binary representation; x must be an Int or an Unknown.
+// If x is Unknown, the result is 0.
+func BitLen(x Value) int {
+ switch x := x.(type) {
+ case int64Val:
+ return new(big.Int).SetInt64(int64(x)).BitLen()
+ case intVal:
+ return x.val.BitLen()
+ case unknownVal:
+ return 0
+ }
+ panic(fmt.Sprintf("%v not an Int", x))
+}
+
+// Sign returns -1, 0, or 1 depending on whether x < 0, x == 0, or x > 0;
+// x must be numeric or Unknown. For complex values x, the sign is 0 if x == 0,
+// otherwise it is != 0. If x is Unknown, the result is 1.
+func Sign(x Value) int {
+ switch x := x.(type) {
+ case int64Val:
+ switch {
+ case x < 0:
+ return -1
+ case x > 0:
+ return 1
+ }
+ return 0
+ case intVal:
+ return x.val.Sign()
+ case floatVal:
+ return x.val.Sign()
+ case complexVal:
+ return x.re.Sign() | x.im.Sign()
+ case unknownVal:
+ return 1 // avoid spurious division by zero errors
+ }
+ panic(fmt.Sprintf("%v not numeric", x))
+}
+
+// ----------------------------------------------------------------------------
+// Support for serializing/deserializing integers
+
+const (
+ // Compute the size of a Word in bytes.
+ _m = ^big.Word(0)
+ _log = _m>>8&1 + _m>>16&1 + _m>>32&1
+ wordSize = 1 << _log
+)
+
+// Bytes returns the bytes for the absolute value of x in little-
+// endian binary representation; x must be an Int.
+func Bytes(x Value) []byte {
+ var val *big.Int
+ switch x := x.(type) {
+ case int64Val:
+ val = new(big.Int).SetInt64(int64(x))
+ case intVal:
+ val = x.val
+ default:
+ panic(fmt.Sprintf("%v not an Int", x))
+ }
+
+ words := val.Bits()
+ bytes := make([]byte, len(words)*wordSize)
+
+ i := 0
+ for _, w := range words {
+ for j := 0; j < wordSize; j++ {
+ bytes[i] = byte(w)
+ w >>= 8
+ i++
+ }
+ }
+ // remove leading 0's
+ for i > 0 && bytes[i-1] == 0 {
+ i--
+ }
+
+ return bytes[:i]
+}
+
+// MakeFromBytes returns the Int value given the bytes of its little-endian
+// binary representation. An empty byte slice argument represents 0.
+func MakeFromBytes(bytes []byte) Value {
+ words := make([]big.Word, (len(bytes)+(wordSize-1))/wordSize)
+
+ i := 0
+ var w big.Word
+ var s uint
+ for _, b := range bytes {
+ w |= big.Word(b) << s
+ if s += 8; s == wordSize*8 {
+ words[i] = w
+ i++
+ w = 0
+ s = 0
+ }
+ }
+ // store last word
+ if i < len(words) {
+ words[i] = w
+ i++
+ }
+ // remove leading 0's
+ for i > 0 && words[i-1] == 0 {
+ i--
+ }
+
+ return normInt(new(big.Int).SetBits(words[:i]))
+}
+
+// ----------------------------------------------------------------------------
+// Support for disassembling fractions
+
+// Num returns the numerator of x; x must be Int, Float, or Unknown.
+// If x is Unknown, the result is Unknown, otherwise it is an Int
+// with the same sign as x.
+func Num(x Value) Value {
+ switch x := x.(type) {
+ case unknownVal, int64Val, intVal:
+ return x
+ case floatVal:
+ return normInt(x.val.Num())
+ }
+ panic(fmt.Sprintf("%v not Int or Float", x))
+}
+
+// Denom returns the denominator of x; x must be Int, Float, or Unknown.
+// If x is Unknown, the result is Unknown, otherwise it is an Int >= 1.
+func Denom(x Value) Value {
+ switch x := x.(type) {
+ case unknownVal:
+ return x
+ case int64Val, intVal:
+ return int64Val(1)
+ case floatVal:
+ return normInt(x.val.Denom())
+ }
+ panic(fmt.Sprintf("%v not Int or Float", x))
+}
+
+// ----------------------------------------------------------------------------
+// Support for assembling/disassembling complex numbers
+
+// MakeImag returns the numeric value x*i (possibly 0);
+// x must be Int, Float, or Unknown.
+// If x is Unknown, the result is Unknown.
+func MakeImag(x Value) Value {
+ var im *big.Rat
+ switch x := x.(type) {
+ case unknownVal:
+ return x
+ case int64Val:
+ im = big.NewRat(int64(x), 1)
+ case intVal:
+ im = new(big.Rat).SetFrac(x.val, int1)
+ case floatVal:
+ im = x.val
+ default:
+ panic(fmt.Sprintf("%v not Int or Float", x))
+ }
+ return normComplex(rat0, im)
+}
+
+// Real returns the real part of x, which must be a numeric or unknown value.
+// If x is Unknown, the result is Unknown.
+func Real(x Value) Value {
+ switch x := x.(type) {
+ case unknownVal, int64Val, intVal, floatVal:
+ return x
+ case complexVal:
+ return normFloat(x.re)
+ }
+ panic(fmt.Sprintf("%v not numeric", x))
+}
+
+// Imag returns the imaginary part of x, which must be a numeric or unknown value.
+// If x is Unknown, the result is Unknown.
+func Imag(x Value) Value {
+ switch x := x.(type) {
+ case unknownVal:
+ return x
+ case int64Val, intVal, floatVal:
+ return int64Val(0)
+ case complexVal:
+ return normFloat(x.im)
+ }
+ panic(fmt.Sprintf("%v not numeric", x))
+}
+
+// ----------------------------------------------------------------------------
+// Operations
+
+// is32bit reports whether x can be represented using 32 bits.
+func is32bit(x int64) bool {
+ const s = 32
+ return -1<<(s-1) <= x && x <= 1<<(s-1)-1
+}
+
+// is63bit reports whether x can be represented using 63 bits.
+func is63bit(x int64) bool {
+ const s = 63
+ return -1<<(s-1) <= x && x <= 1<<(s-1)-1
+}
+
+// UnaryOp returns the result of the unary expression op y.
+// The operation must be defined for the operand.
+// If size >= 0 it specifies the ^ (xor) result size in bytes.
+// If y is Unknown, the result is Unknown.
+//
+func UnaryOp(op token.Token, y Value, size int) Value {
+ switch op {
+ case token.ADD:
+ switch y.(type) {
+ case unknownVal, int64Val, intVal, floatVal, complexVal:
+ return y
+ }
+
+ case token.SUB:
+ switch y := y.(type) {
+ case unknownVal:
+ return y
+ case int64Val:
+ if z := -y; z != y {
+ return z // no overflow
+ }
+ return normInt(new(big.Int).Neg(big.NewInt(int64(y))))
+ case intVal:
+ return normInt(new(big.Int).Neg(y.val))
+ case floatVal:
+ return normFloat(new(big.Rat).Neg(y.val))
+ case complexVal:
+ return normComplex(new(big.Rat).Neg(y.re), new(big.Rat).Neg(y.im))
+ }
+
+ case token.XOR:
+ var z big.Int
+ switch y := y.(type) {
+ case unknownVal:
+ return y
+ case int64Val:
+ z.Not(big.NewInt(int64(y)))
+ case intVal:
+ z.Not(y.val)
+ default:
+ goto Error
+ }
+ // For unsigned types, the result will be negative and
+ // thus "too large": We must limit the result size to
+ // the type's size.
+ if size >= 0 {
+ s := uint(size) * 8
+ z.AndNot(&z, new(big.Int).Lsh(big.NewInt(-1), s)) // z &^= (-1)< ord(y) {
+ y, x = match(y, x)
+ return x, y
+ }
+ // ord(x) <= ord(y)
+
+ switch x := x.(type) {
+ case unknownVal:
+ return x, x
+
+ case boolVal, stringVal, complexVal:
+ return x, y
+
+ case int64Val:
+ switch y := y.(type) {
+ case int64Val:
+ return x, y
+ case intVal:
+ return intVal{big.NewInt(int64(x))}, y
+ case floatVal:
+ return floatVal{big.NewRat(int64(x), 1)}, y
+ case complexVal:
+ return complexVal{big.NewRat(int64(x), 1), rat0}, y
+ }
+
+ case intVal:
+ switch y := y.(type) {
+ case intVal:
+ return x, y
+ case floatVal:
+ return floatVal{new(big.Rat).SetFrac(x.val, int1)}, y
+ case complexVal:
+ return complexVal{new(big.Rat).SetFrac(x.val, int1), rat0}, y
+ }
+
+ case floatVal:
+ switch y := y.(type) {
+ case floatVal:
+ return x, y
+ case complexVal:
+ return complexVal{x.val, rat0}, y
+ }
+ }
+
+ panic("unreachable")
+}
+
+// BinaryOp returns the result of the binary expression x op y.
+// The operation must be defined for the operands. If one of the
+// operands is Unknown, the result is Unknown.
+// To force integer division of Int operands, use op == token.QUO_ASSIGN
+// instead of token.QUO; the result is guaranteed to be Int in this case.
+// Division by zero leads to a run-time panic.
+//
+func BinaryOp(x Value, op token.Token, y Value) Value {
+ x, y = match(x, y)
+
+ switch x := x.(type) {
+ case unknownVal:
+ return x
+
+ case boolVal:
+ y := y.(boolVal)
+ switch op {
+ case token.LAND:
+ return x && y
+ case token.LOR:
+ return x || y
+ }
+
+ case int64Val:
+ a := int64(x)
+ b := int64(y.(int64Val))
+ var c int64
+ switch op {
+ case token.ADD:
+ if !is63bit(a) || !is63bit(b) {
+ return normInt(new(big.Int).Add(big.NewInt(a), big.NewInt(b)))
+ }
+ c = a + b
+ case token.SUB:
+ if !is63bit(a) || !is63bit(b) {
+ return normInt(new(big.Int).Sub(big.NewInt(a), big.NewInt(b)))
+ }
+ c = a - b
+ case token.MUL:
+ if !is32bit(a) || !is32bit(b) {
+ return normInt(new(big.Int).Mul(big.NewInt(a), big.NewInt(b)))
+ }
+ c = a * b
+ case token.QUO:
+ return normFloat(new(big.Rat).SetFrac(big.NewInt(a), big.NewInt(b)))
+ case token.QUO_ASSIGN: // force integer division
+ c = a / b
+ case token.REM:
+ c = a % b
+ case token.AND:
+ c = a & b
+ case token.OR:
+ c = a | b
+ case token.XOR:
+ c = a ^ b
+ case token.AND_NOT:
+ c = a &^ b
+ default:
+ goto Error
+ }
+ return int64Val(c)
+
+ case intVal:
+ a := x.val
+ b := y.(intVal).val
+ var c big.Int
+ switch op {
+ case token.ADD:
+ c.Add(a, b)
+ case token.SUB:
+ c.Sub(a, b)
+ case token.MUL:
+ c.Mul(a, b)
+ case token.QUO:
+ return normFloat(new(big.Rat).SetFrac(a, b))
+ case token.QUO_ASSIGN: // force integer division
+ c.Quo(a, b)
+ case token.REM:
+ c.Rem(a, b)
+ case token.AND:
+ c.And(a, b)
+ case token.OR:
+ c.Or(a, b)
+ case token.XOR:
+ c.Xor(a, b)
+ case token.AND_NOT:
+ c.AndNot(a, b)
+ default:
+ goto Error
+ }
+ return normInt(&c)
+
+ case floatVal:
+ a := x.val
+ b := y.(floatVal).val
+ var c big.Rat
+ switch op {
+ case token.ADD:
+ c.Add(a, b)
+ case token.SUB:
+ c.Sub(a, b)
+ case token.MUL:
+ c.Mul(a, b)
+ case token.QUO:
+ c.Quo(a, b)
+ default:
+ goto Error
+ }
+ return normFloat(&c)
+
+ case complexVal:
+ y := y.(complexVal)
+ a, b := x.re, x.im
+ c, d := y.re, y.im
+ var re, im big.Rat
+ switch op {
+ case token.ADD:
+ // (a+c) + i(b+d)
+ re.Add(a, c)
+ im.Add(b, d)
+ case token.SUB:
+ // (a-c) + i(b-d)
+ re.Sub(a, c)
+ im.Sub(b, d)
+ case token.MUL:
+ // (ac-bd) + i(bc+ad)
+ var ac, bd, bc, ad big.Rat
+ ac.Mul(a, c)
+ bd.Mul(b, d)
+ bc.Mul(b, c)
+ ad.Mul(a, d)
+ re.Sub(&ac, &bd)
+ im.Add(&bc, &ad)
+ case token.QUO:
+ // (ac+bd)/s + i(bc-ad)/s, with s = cc + dd
+ var ac, bd, bc, ad, s, cc, dd big.Rat
+ ac.Mul(a, c)
+ bd.Mul(b, d)
+ bc.Mul(b, c)
+ ad.Mul(a, d)
+ cc.Mul(c, c)
+ dd.Mul(d, d)
+ s.Add(&cc, &dd)
+ re.Add(&ac, &bd)
+ re.Quo(&re, &s)
+ im.Sub(&bc, &ad)
+ im.Quo(&im, &s)
+ default:
+ goto Error
+ }
+ return normComplex(&re, &im)
+
+ case stringVal:
+ if op == token.ADD {
+ return x + y.(stringVal)
+ }
+ }
+
+Error:
+ panic(fmt.Sprintf("invalid binary operation %v %s %v", x, op, y))
+}
+
+// Shift returns the result of the shift expression x op s
+// with op == token.SHL or token.SHR (<< or >>). x must be
+// an Int or an Unknown. If x is Unknown, the result is x.
+//
+func Shift(x Value, op token.Token, s uint) Value {
+ switch x := x.(type) {
+ case unknownVal:
+ return x
+
+ case int64Val:
+ if s == 0 {
+ return x
+ }
+ switch op {
+ case token.SHL:
+ z := big.NewInt(int64(x))
+ return normInt(z.Lsh(z, s))
+ case token.SHR:
+ return x >> s
+ }
+
+ case intVal:
+ if s == 0 {
+ return x
+ }
+ var z big.Int
+ switch op {
+ case token.SHL:
+ return normInt(z.Lsh(x.val, s))
+ case token.SHR:
+ return normInt(z.Rsh(x.val, s))
+ }
+ }
+
+ panic(fmt.Sprintf("invalid shift %v %s %d", x, op, s))
+}
+
+func cmpZero(x int, op token.Token) bool {
+ switch op {
+ case token.EQL:
+ return x == 0
+ case token.NEQ:
+ return x != 0
+ case token.LSS:
+ return x < 0
+ case token.LEQ:
+ return x <= 0
+ case token.GTR:
+ return x > 0
+ case token.GEQ:
+ return x >= 0
+ }
+ panic("unreachable")
+}
+
+// Compare returns the result of the comparison x op y.
+// The comparison must be defined for the operands.
+// If one of the operands is Unknown, the result is
+// false.
+//
+func Compare(x Value, op token.Token, y Value) bool {
+ x, y = match(x, y)
+
+ switch x := x.(type) {
+ case unknownVal:
+ return false
+
+ case boolVal:
+ y := y.(boolVal)
+ switch op {
+ case token.EQL:
+ return x == y
+ case token.NEQ:
+ return x != y
+ }
+
+ case int64Val:
+ y := y.(int64Val)
+ switch op {
+ case token.EQL:
+ return x == y
+ case token.NEQ:
+ return x != y
+ case token.LSS:
+ return x < y
+ case token.LEQ:
+ return x <= y
+ case token.GTR:
+ return x > y
+ case token.GEQ:
+ return x >= y
+ }
+
+ case intVal:
+ return cmpZero(x.val.Cmp(y.(intVal).val), op)
+
+ case floatVal:
+ return cmpZero(x.val.Cmp(y.(floatVal).val), op)
+
+ case complexVal:
+ y := y.(complexVal)
+ re := x.re.Cmp(y.re)
+ im := x.im.Cmp(y.im)
+ switch op {
+ case token.EQL:
+ return re == 0 && im == 0
+ case token.NEQ:
+ return re != 0 || im != 0
+ }
+
+ case stringVal:
+ y := y.(stringVal)
+ switch op {
+ case token.EQL:
+ return x == y
+ case token.NEQ:
+ return x != y
+ case token.LSS:
+ return x < y
+ case token.LEQ:
+ return x <= y
+ case token.GTR:
+ return x > y
+ case token.GEQ:
+ return x >= y
+ }
+ }
+
+ panic(fmt.Sprintf("invalid comparison %v %s %v", x, op, y))
+}
diff --git a/llgo/third_party/go.tools/go/exact/exact_test.go b/llgo/third_party/go.tools/go/exact/exact_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..c51797080f43eea49a74b39975dc095225a37a35
--- /dev/null
+++ b/llgo/third_party/go.tools/go/exact/exact_test.go
@@ -0,0 +1,348 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package exact
+
+import (
+ "go/token"
+ "strings"
+ "testing"
+)
+
+// TODO(gri) expand this test framework
+
+var opTests = []string{
+ // unary operations
+ `+ 0 = 0`,
+ `+ ? = ?`,
+ `- 1 = -1`,
+ `- ? = ?`,
+ `^ 0 = -1`,
+ `^ ? = ?`,
+
+ `! true = false`,
+ `! false = true`,
+ `! ? = ?`,
+
+ // etc.
+
+ // binary operations
+ `"" + "" = ""`,
+ `"foo" + "" = "foo"`,
+ `"" + "bar" = "bar"`,
+ `"foo" + "bar" = "foobar"`,
+
+ `0 + 0 = 0`,
+ `0 + 0.1 = 0.1`,
+ `0 + 0.1i = 0.1i`,
+ `0.1 + 0.9 = 1`,
+ `1e100 + 1e100 = 2e100`,
+ `? + 0 = ?`,
+ `0 + ? = ?`,
+
+ `0 - 0 = 0`,
+ `0 - 0.1 = -0.1`,
+ `0 - 0.1i = -0.1i`,
+ `1e100 - 1e100 = 0`,
+ `? - 0 = ?`,
+ `0 - ? = ?`,
+
+ `0 * 0 = 0`,
+ `1 * 0.1 = 0.1`,
+ `1 * 0.1i = 0.1i`,
+ `1i * 1i = -1`,
+ `? * 0 = ?`,
+ `0 * ? = ?`,
+
+ `0 / 0 = "division_by_zero"`,
+ `10 / 2 = 5`,
+ `5 / 3 = 5/3`,
+ `5i / 3i = 5/3`,
+ `? / 0 = ?`,
+ `0 / ? = ?`,
+
+ `0 % 0 = "runtime_error:_integer_divide_by_zero"`, // TODO(gri) should be the same as for /
+ `10 % 3 = 1`,
+ `? % 0 = ?`,
+ `0 % ? = ?`,
+
+ `0 & 0 = 0`,
+ `12345 & 0 = 0`,
+ `0xff & 0xf = 0xf`,
+ `? & 0 = ?`,
+ `0 & ? = ?`,
+
+ `0 | 0 = 0`,
+ `12345 | 0 = 12345`,
+ `0xb | 0xa0 = 0xab`,
+ `? | 0 = ?`,
+ `0 | ? = ?`,
+
+ `0 ^ 0 = 0`,
+ `1 ^ -1 = -2`,
+ `? ^ 0 = ?`,
+ `0 ^ ? = ?`,
+
+ `0 &^ 0 = 0`,
+ `0xf &^ 1 = 0xe`,
+ `1 &^ 0xf = 0`,
+ // etc.
+
+ // shifts
+ `0 << 0 = 0`,
+ `1 << 10 = 1024`,
+ `0 >> 0 = 0`,
+ `1024 >> 10 == 1`,
+ `? << 0 == ?`,
+ `? >> 10 == ?`,
+ // etc.
+
+ // comparisons
+ `false == false = true`,
+ `false == true = false`,
+ `true == false = false`,
+ `true == true = true`,
+
+ `false != false = false`,
+ `false != true = true`,
+ `true != false = true`,
+ `true != true = false`,
+
+ `"foo" == "bar" = false`,
+ `"foo" != "bar" = true`,
+ `"foo" < "bar" = false`,
+ `"foo" <= "bar" = false`,
+ `"foo" > "bar" = true`,
+ `"foo" >= "bar" = true`,
+
+ `0 == 0 = true`,
+ `0 != 0 = false`,
+ `0 < 10 = true`,
+ `10 <= 10 = true`,
+ `0 > 10 = false`,
+ `10 >= 10 = true`,
+
+ `1/123456789 == 1/123456789 == true`,
+ `1/123456789 != 1/123456789 == false`,
+ `1/123456789 < 1/123456788 == true`,
+ `1/123456788 <= 1/123456789 == false`,
+ `0.11 > 0.11 = false`,
+ `0.11 >= 0.11 = true`,
+
+ `? == 0 = false`,
+ `? != 0 = false`,
+ `? < 10 = false`,
+ `? <= 10 = false`,
+ `? > 10 = false`,
+ `? >= 10 = false`,
+
+ `0 == ? = false`,
+ `0 != ? = false`,
+ `0 < ? = false`,
+ `10 <= ? = false`,
+ `0 > ? = false`,
+ `10 >= ? = false`,
+
+ // etc.
+}
+
+func TestOps(t *testing.T) {
+ for _, test := range opTests {
+ a := strings.Split(test, " ")
+ i := 0 // operator index
+
+ var x, x0 Value
+ switch len(a) {
+ case 4:
+ // unary operation
+ case 5:
+ // binary operation
+ x, x0 = val(a[0]), val(a[0])
+ i = 1
+ default:
+ t.Errorf("invalid test case: %s", test)
+ continue
+ }
+
+ op, ok := optab[a[i]]
+ if !ok {
+ panic("missing optab entry for " + a[i])
+ }
+
+ y, y0 := val(a[i+1]), val(a[i+1])
+
+ got := doOp(x, op, y)
+ want := val(a[i+3])
+ if !eql(got, want) {
+ t.Errorf("%s: got %s; want %s", test, got, want)
+ }
+ if x0 != nil && !eql(x, x0) {
+ t.Errorf("%s: x changed to %s", test, x)
+ }
+ if !eql(y, y0) {
+ t.Errorf("%s: y changed to %s", test, y)
+ }
+ }
+}
+
+func eql(x, y Value) bool {
+ _, ux := x.(unknownVal)
+ _, uy := y.(unknownVal)
+ if ux || uy {
+ return ux == uy
+ }
+ return Compare(x, token.EQL, y)
+}
+
+// ----------------------------------------------------------------------------
+// Support functions
+
+func val(lit string) Value {
+ if len(lit) == 0 {
+ return MakeUnknown()
+ }
+
+ switch lit {
+ case "?":
+ return MakeUnknown()
+ case "true":
+ return MakeBool(true)
+ case "false":
+ return MakeBool(false)
+ }
+
+ tok := token.INT
+ switch first, last := lit[0], lit[len(lit)-1]; {
+ case first == '"' || first == '`':
+ tok = token.STRING
+ lit = strings.Replace(lit, "_", " ", -1)
+ case first == '\'':
+ tok = token.CHAR
+ case last == 'i':
+ tok = token.IMAG
+ default:
+ if !strings.HasPrefix(lit, "0x") && strings.ContainsAny(lit, "./Ee") {
+ tok = token.FLOAT
+ }
+ }
+
+ return MakeFromLiteral(lit, tok)
+}
+
+var optab = map[string]token.Token{
+ "!": token.NOT,
+
+ "+": token.ADD,
+ "-": token.SUB,
+ "*": token.MUL,
+ "/": token.QUO,
+ "%": token.REM,
+
+ "<<": token.SHL,
+ ">>": token.SHR,
+
+ "&": token.AND,
+ "|": token.OR,
+ "^": token.XOR,
+ "&^": token.AND_NOT,
+
+ "==": token.EQL,
+ "!=": token.NEQ,
+ "<": token.LSS,
+ "<=": token.LEQ,
+ ">": token.GTR,
+ ">=": token.GEQ,
+}
+
+func panicHandler(v *Value) {
+ switch p := recover().(type) {
+ case nil:
+ // nothing to do
+ case string:
+ *v = MakeString(p)
+ case error:
+ *v = MakeString(p.Error())
+ default:
+ panic(p)
+ }
+}
+
+func doOp(x Value, op token.Token, y Value) (z Value) {
+ defer panicHandler(&z)
+
+ if x == nil {
+ return UnaryOp(op, y, -1)
+ }
+
+ switch op {
+ case token.EQL, token.NEQ, token.LSS, token.LEQ, token.GTR, token.GEQ:
+ return MakeBool(Compare(x, op, y))
+ case token.SHL, token.SHR:
+ s, _ := Int64Val(y)
+ return Shift(x, op, uint(s))
+ default:
+ return BinaryOp(x, op, y)
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Other tests
+
+var fracTests = []string{
+ "0 0 1",
+ "1 1 1",
+ "-1 -1 1",
+ "1.2 6 5",
+ "-0.991 -991 1000",
+ "1e100 1e100 1",
+}
+
+func TestFractions(t *testing.T) {
+ for _, test := range fracTests {
+ a := strings.Split(test, " ")
+ if len(a) != 3 {
+ t.Errorf("invalid test case: %s", test)
+ continue
+ }
+
+ x := val(a[0])
+ n := val(a[1])
+ d := val(a[2])
+
+ if got := Num(x); !eql(got, n) {
+ t.Errorf("%s: got num = %s; want %s", test, got, n)
+ }
+
+ if got := Denom(x); !eql(got, d) {
+ t.Errorf("%s: got denom = %s; want %s", test, got, d)
+ }
+ }
+}
+
+var bytesTests = []string{
+ "0",
+ "1",
+ "123456789",
+ "123456789012345678901234567890123456789012345678901234567890",
+}
+
+func TestBytes(t *testing.T) {
+ for _, test := range bytesTests {
+ x := val(test)
+ bytes := Bytes(x)
+
+ // special case 0
+ if Sign(x) == 0 && len(bytes) != 0 {
+ t.Errorf("%s: got %v; want empty byte slice", test, bytes)
+ }
+
+ if n := len(bytes); n > 0 && bytes[n-1] == 0 {
+ t.Errorf("%s: got %v; want no leading 0 byte", test, bytes)
+ }
+
+ if got := MakeFromBytes(bytes); !eql(got, x) {
+ t.Errorf("%s: got %s; want %s (bytes = %v)", test, got, x, bytes)
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/go/exact/go13.go b/llgo/third_party/go.tools/go/exact/go13.go
new file mode 100644
index 0000000000000000000000000000000000000000..b330a10c72f29357135b4c5c279a9eb770df1ad4
--- /dev/null
+++ b/llgo/third_party/go.tools/go/exact/go13.go
@@ -0,0 +1,23 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+
+package exact
+
+import (
+ "math"
+ "math/big"
+)
+
+func ratToFloat32(x *big.Rat) (float32, bool) {
+ // Before 1.4, there's no Rat.Float32.
+ // Emulate it, albeit at the cost of
+ // imprecision in corner cases.
+ x64, exact := x.Float64()
+ x32 := float32(x64)
+ if math.IsInf(float64(x32), 0) {
+ exact = false
+ }
+ return x32, exact
+}
diff --git a/llgo/third_party/go.tools/go/gccgoimporter/gccgoinstallation.go b/llgo/third_party/go.tools/go/gccgoimporter/gccgoinstallation.go
new file mode 100644
index 0000000000000000000000000000000000000000..68608e9101518d5c23e341818d124057de9238bc
--- /dev/null
+++ b/llgo/third_party/go.tools/go/gccgoimporter/gccgoinstallation.go
@@ -0,0 +1,95 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gccgoimporter
+
+import (
+ "bufio"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// Information about a specific installation of gccgo.
+type GccgoInstallation struct {
+ // Version of gcc (e.g. 4.8.0).
+ GccVersion string
+
+ // Target triple (e.g. x86_64-unknown-linux-gnu).
+ TargetTriple string
+
+ // Built-in library paths used by this installation.
+ LibPaths []string
+}
+
+// Ask the driver at the given path for information for this GccgoInstallation.
+func (inst *GccgoInstallation) InitFromDriver(gccgoPath string) (err error) {
+ cmd := exec.Command(gccgoPath, "-###", "-S", "-x", "go", "-")
+ stderr, err := cmd.StderrPipe()
+ if err != nil {
+ return
+ }
+
+ err = cmd.Start()
+ if err != nil {
+ return
+ }
+
+ scanner := bufio.NewScanner(stderr)
+ for scanner.Scan() {
+ line := scanner.Text()
+ switch {
+ case strings.HasPrefix(line, "Target: "):
+ inst.TargetTriple = line[8:]
+
+ case line[0] == ' ':
+ args := strings.Fields(line)
+ for _, arg := range args[1:] {
+ if strings.HasPrefix(arg, "-L") {
+ inst.LibPaths = append(inst.LibPaths, arg[2:])
+ }
+ }
+ }
+ }
+
+ stdout, err := exec.Command(gccgoPath, "-dumpversion").Output()
+ if err != nil {
+ return
+ }
+ inst.GccVersion = strings.TrimSpace(string(stdout))
+
+ return
+}
+
+// Return the list of export search paths for this GccgoInstallation.
+func (inst *GccgoInstallation) SearchPaths() (paths []string) {
+ for _, lpath := range inst.LibPaths {
+ spath := filepath.Join(lpath, "go", inst.GccVersion)
+ fi, err := os.Stat(spath)
+ if err != nil || !fi.IsDir() {
+ continue
+ }
+ paths = append(paths, spath)
+
+ spath = filepath.Join(spath, inst.TargetTriple)
+ fi, err = os.Stat(spath)
+ if err != nil || !fi.IsDir() {
+ continue
+ }
+ paths = append(paths, spath)
+ }
+
+ paths = append(paths, inst.LibPaths...)
+
+ return
+}
+
+// Return an importer that searches incpaths followed by the gcc installation's
+// built-in search paths and the current directory.
+func (inst *GccgoInstallation) GetImporter(incpaths []string, initmap map[*types.Package]InitData) types.Importer {
+ return GetImporter(append(append(incpaths, inst.SearchPaths()...), "."), initmap)
+}
diff --git a/llgo/third_party/go.tools/go/gccgoimporter/gccgoinstallation_test.go b/llgo/third_party/go.tools/go/gccgoimporter/gccgoinstallation_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..6d1a4be330b5909e6ef6f12e48275560c9dfe1f3
--- /dev/null
+++ b/llgo/third_party/go.tools/go/gccgoimporter/gccgoinstallation_test.go
@@ -0,0 +1,194 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gccgoimporter
+
+import (
+ "runtime"
+ "testing"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+var importablePackages = [...]string{
+ "archive/tar",
+ "archive/zip",
+ "bufio",
+ "bytes",
+ "compress/bzip2",
+ "compress/flate",
+ "compress/gzip",
+ "compress/lzw",
+ "compress/zlib",
+ "container/heap",
+ "container/list",
+ "container/ring",
+ "crypto/aes",
+ "crypto/cipher",
+ "crypto/des",
+ "crypto/dsa",
+ "crypto/ecdsa",
+ "crypto/elliptic",
+ "crypto",
+ "crypto/hmac",
+ "crypto/md5",
+ "crypto/rand",
+ "crypto/rc4",
+ "crypto/rsa",
+ "crypto/sha1",
+ "crypto/sha256",
+ "crypto/sha512",
+ "crypto/subtle",
+ "crypto/tls",
+ "crypto/x509",
+ "crypto/x509/pkix",
+ "database/sql/driver",
+ "database/sql",
+ "debug/dwarf",
+ "debug/elf",
+ "debug/gosym",
+ "debug/macho",
+ "debug/pe",
+ "encoding/ascii85",
+ "encoding/asn1",
+ "encoding/base32",
+ "encoding/base64",
+ "encoding/binary",
+ "encoding/csv",
+ "encoding/gob",
+ "encoding",
+ "encoding/hex",
+ "encoding/json",
+ "encoding/pem",
+ "encoding/xml",
+ "errors",
+ "exp/proxy",
+ "exp/terminal",
+ "expvar",
+ "flag",
+ "fmt",
+ "go/ast",
+ "go/build",
+ "go/doc",
+ "go/format",
+ "go/parser",
+ "go/printer",
+ "go/scanner",
+ "go/token",
+ "hash/adler32",
+ "hash/crc32",
+ "hash/crc64",
+ "hash/fnv",
+ "hash",
+ "html",
+ "html/template",
+ "image/color",
+ "image/color/palette",
+ "image/draw",
+ "image/gif",
+ "image",
+ "image/jpeg",
+ "image/png",
+ "index/suffixarray",
+ "io",
+ "io/ioutil",
+ "log",
+ "log/syslog",
+ "math/big",
+ "math/cmplx",
+ "math",
+ "math/rand",
+ "mime",
+ "mime/multipart",
+ "net",
+ "net/http/cgi",
+ "net/http/cookiejar",
+ "net/http/fcgi",
+ "net/http",
+ "net/http/httptest",
+ "net/http/httputil",
+ "net/http/pprof",
+ "net/mail",
+ "net/rpc",
+ "net/rpc/jsonrpc",
+ "net/smtp",
+ "net/textproto",
+ "net/url",
+ "old/regexp",
+ "old/template",
+ "os/exec",
+ "os",
+ "os/signal",
+ "os/user",
+ "path/filepath",
+ "path",
+ "reflect",
+ "regexp",
+ "regexp/syntax",
+ "runtime/debug",
+ "runtime",
+ "runtime/pprof",
+ "sort",
+ "strconv",
+ "strings",
+ "sync/atomic",
+ "sync",
+ "syscall",
+ "testing",
+ "testing/iotest",
+ "testing/quick",
+ "text/scanner",
+ "text/tabwriter",
+ "text/template",
+ "text/template/parse",
+ "time",
+ "unicode",
+ "unicode/utf16",
+ "unicode/utf8",
+}
+
+func TestInstallationImporter(t *testing.T) {
+ // This test relies on gccgo being around, which it most likely will be if we
+ // were compiled with gccgo.
+ if runtime.Compiler != "gccgo" {
+ t.Skip("This test needs gccgo")
+ return
+ }
+
+ var inst GccgoInstallation
+ err := inst.InitFromDriver("gccgo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ imp := inst.GetImporter(nil, nil)
+
+ // Ensure we don't regress the number of packages we can parse. First import
+ // all packages into the same map and then each individually.
+ pkgMap := make(map[string]*types.Package)
+ for _, pkg := range importablePackages {
+ _, err = imp(pkgMap, pkg)
+ if err != nil {
+ t.Error(err)
+ }
+ }
+
+ for _, pkg := range importablePackages {
+ _, err = imp(make(map[string]*types.Package), pkg)
+ if err != nil {
+ t.Error(err)
+ }
+ }
+
+ // Test for certain specific entities in the imported data.
+ for _, test := range [...]importerTest{
+ {pkgpath: "io", name: "Reader", want: "type Reader interface{Read(p []uint8) (n int, err error)}"},
+ {pkgpath: "io", name: "ReadWriter", want: "type ReadWriter interface{Reader; Writer}"},
+ {pkgpath: "math", name: "Pi", want: "const Pi untyped float"},
+ {pkgpath: "math", name: "Sin", want: "func Sin(x float64) float64"},
+ {pkgpath: "sort", name: "Ints", want: "func Ints(a []int)"},
+ {pkgpath: "unsafe", name: "Pointer", want: "type Pointer unsafe.Pointer"},
+ } {
+ runImporterTest(t, imp, nil, &test)
+ }
+}
diff --git a/llgo/third_party/go.tools/go/gccgoimporter/importer.go b/llgo/third_party/go.tools/go/gccgoimporter/importer.go
new file mode 100644
index 0000000000000000000000000000000000000000..cf5bcf06006c78fc8ff3d285c2bc2fdcceaeb1ba
--- /dev/null
+++ b/llgo/third_party/go.tools/go/gccgoimporter/importer.go
@@ -0,0 +1,199 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gccgoimporter implements Import for gccgo-generated object files.
+package gccgoimporter
+
+import (
+ "bytes"
+ "debug/elf"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "llvm.org/llgo/third_party/go.tools/go/importer"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// A PackageInit describes an imported package that needs initialization.
+type PackageInit struct {
+ Name string // short package name
+ InitFunc string // name of init function
+ Priority int // priority of init function, see InitData.Priority
+}
+
+// The gccgo-specific init data for a package.
+type InitData struct {
+ // Initialization priority of this package relative to other packages.
+ // This is based on the maximum depth of the package's dependency graph;
+ // it is guaranteed to be greater than that of its dependencies.
+ Priority int
+
+ // The list of packages which this package depends on to be initialized,
+ // including itself if needed. This is the subset of the transitive closure of
+ // the package's dependencies that need initialization.
+ Inits []PackageInit
+}
+
+// Locate the file from which to read export data.
+// This is intended to replicate the logic in gofrontend.
+func findExportFile(searchpaths []string, pkgpath string) (string, error) {
+ for _, spath := range searchpaths {
+ pkgfullpath := filepath.Join(spath, pkgpath)
+ pkgdir, name := filepath.Split(pkgfullpath)
+
+ for _, filepath := range [...]string{
+ pkgfullpath,
+ pkgfullpath + ".gox",
+ pkgdir + "lib" + name + ".so",
+ pkgdir + "lib" + name + ".a",
+ pkgfullpath + ".o",
+ } {
+ fi, err := os.Stat(filepath)
+ if err == nil && !fi.IsDir() {
+ return filepath, nil
+ }
+ }
+ }
+
+ return "", fmt.Errorf("%s: could not find export data (tried %s)", pkgpath, strings.Join(searchpaths, ":"))
+}
+
+const (
+ gccgov1Magic = "v1;\n"
+ goimporterMagic = "\n$$ "
+ archiveMagic = "! 0 {
+ initdata := initmap[pkg]
+ found := false
+ // Check that the package's own init function has the package's priority
+ for _, pkginit := range initdata.Inits {
+ if pkginit.InitFunc == test.wantinits[0] {
+ if initdata.Priority != pkginit.Priority {
+ t.Errorf("%s: got self priority %d; want %d", test.pkgpath, pkginit.Priority, initdata.Priority)
+ }
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ t.Errorf("%s: could not find expected function %q", test.pkgpath, test.wantinits[0])
+ }
+
+ // Each init function in the list other than the first one is a
+ // dependency of the function immediately before it. Check that
+ // the init functions appear in descending priority order.
+ priority := initdata.Priority
+ for _, wantdepinit := range test.wantinits[1:] {
+ found = false
+ for _, pkginit := range initdata.Inits {
+ if pkginit.InitFunc == wantdepinit {
+ if priority <= pkginit.Priority {
+ t.Errorf("%s: got dep priority %d; want less than %d", test.pkgpath, pkginit.Priority, priority)
+ }
+ found = true
+ priority = pkginit.Priority
+ break
+ }
+ }
+
+ if !found {
+ t.Errorf("%s: could not find expected function %q", test.pkgpath, wantdepinit)
+ }
+ }
+ }
+}
+
+var importerTests = [...]importerTest{
+ {pkgpath: "pointer", name: "Int8Ptr", want: "type Int8Ptr *int8"},
+ {pkgpath: "complexnums", name: "NN", want: "const NN untyped complex", wantval: "(-1/1 + -1/1i)"},
+ {pkgpath: "complexnums", name: "NP", want: "const NP untyped complex", wantval: "(-1/1 + 1/1i)"},
+ {pkgpath: "complexnums", name: "PN", want: "const PN untyped complex", wantval: "(1/1 + -1/1i)"},
+ {pkgpath: "complexnums", name: "PP", want: "const PP untyped complex", wantval: "(1/1 + 1/1i)"},
+ {pkgpath: "imports", wantinits: []string{"imports..import", "fmt..import", "math..import"}},
+}
+
+func TestGoxImporter(t *testing.T) {
+ initmap := make(map[*types.Package]InitData)
+ imp := GetImporter([]string{"testdata"}, initmap)
+
+ for _, test := range importerTests {
+ runImporterTest(t, imp, initmap, &test)
+ }
+}
+
+func TestObjImporter(t *testing.T) {
+ // This test relies on gccgo being around, which it most likely will be if we
+ // were compiled with gccgo.
+ if runtime.Compiler != "gccgo" {
+ t.Skip("This test needs gccgo")
+ return
+ }
+
+ tmpdir, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ initmap := make(map[*types.Package]InitData)
+ imp := GetImporter([]string{tmpdir}, initmap)
+
+ artmpdir, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ arinitmap := make(map[*types.Package]InitData)
+ arimp := GetImporter([]string{artmpdir}, arinitmap)
+
+ for _, test := range importerTests {
+ gofile := filepath.Join("testdata", test.pkgpath+".go")
+ ofile := filepath.Join(tmpdir, test.pkgpath+".o")
+ afile := filepath.Join(artmpdir, "lib"+test.pkgpath+".a")
+
+ cmd := exec.Command("gccgo", "-fgo-pkgpath="+test.pkgpath, "-c", "-o", ofile, gofile)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Logf("%s", out)
+ t.Fatalf("gccgo %s failed: %s", gofile, err)
+ }
+
+ runImporterTest(t, imp, initmap, &test)
+
+ cmd = exec.Command("ar", "cr", afile, ofile)
+ out, err = cmd.CombinedOutput()
+ if err != nil {
+ t.Logf("%s", out)
+ t.Fatalf("ar cr %s %s failed: %s", afile, ofile, err)
+ }
+
+ runImporterTest(t, arimp, arinitmap, &test)
+
+ if err = os.Remove(ofile); err != nil {
+ t.Fatal(err)
+ }
+ if err = os.Remove(afile); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if err = os.Remove(tmpdir); err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/llgo/third_party/go.tools/go/gccgoimporter/parser.go b/llgo/third_party/go.tools/go/gccgoimporter/parser.go
new file mode 100644
index 0000000000000000000000000000000000000000..0eb88c46ef51c55dc4e2282f3cf55acb998c25a0
--- /dev/null
+++ b/llgo/third_party/go.tools/go/gccgoimporter/parser.go
@@ -0,0 +1,856 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gccgoimporter
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "go/token"
+ "io"
+ "strconv"
+ "strings"
+ "text/scanner"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+type parser struct {
+ scanner scanner.Scanner
+ tok rune // current token
+ lit string // literal string; only valid for Ident, Int, String tokens
+ pkgpath string // package path of imported package
+ pkgname string // name of imported package
+ pkg *types.Package // reference to imported package
+ imports map[string]*types.Package // package path -> package object
+ typeMap map[int]types.Type // type number -> type
+ initdata InitData // package init priority data
+}
+
+func (p *parser) init(filename string, src io.Reader, imports map[string]*types.Package) {
+ p.scanner.Init(src)
+ p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) }
+ p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
+ p.scanner.Whitespace = 1<<'\t' | 1<<'\n' | 1<<' '
+ p.scanner.Filename = filename // for good error messages
+ p.next()
+ p.imports = imports
+ p.typeMap = make(map[int]types.Type)
+}
+
+type importError struct {
+ pos scanner.Position
+ err error
+}
+
+func (e importError) Error() string {
+ return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err)
+}
+
+func (p *parser) error(err interface{}) {
+ if s, ok := err.(string); ok {
+ err = errors.New(s)
+ }
+ // panic with a runtime.Error if err is not an error
+ panic(importError{p.scanner.Pos(), err.(error)})
+}
+
+func (p *parser) errorf(format string, args ...interface{}) {
+ p.error(fmt.Errorf(format, args...))
+}
+
+func (p *parser) expect(tok rune) string {
+ lit := p.lit
+ if p.tok != tok {
+ p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit)
+ }
+ p.next()
+ return lit
+}
+
+func (p *parser) expectKeyword(keyword string) {
+ lit := p.expect(scanner.Ident)
+ if lit != keyword {
+ p.errorf("expected keyword %s, got %q", keyword, lit)
+ }
+}
+
+func (p *parser) parseString() string {
+ str, err := strconv.Unquote(p.expect(scanner.String))
+ if err != nil {
+ p.error(err)
+ }
+ return str
+}
+
+// unquotedString = { unquotedStringChar } .
+// unquotedStringChar = .
+func (p *parser) parseUnquotedString() string {
+ if p.tok == scanner.EOF {
+ p.error("unexpected EOF")
+ }
+ var buf bytes.Buffer
+ buf.WriteString(p.scanner.TokenText())
+ // This loop needs to examine each character before deciding whether to consume it. If we see a semicolon,
+ // we need to let it be consumed by p.next().
+ for ch := p.scanner.Peek(); ch != ';' && ch != scanner.EOF && p.scanner.Whitespace&(1< 0 {
+ p.expect(',')
+ }
+ par, variadic := p.parseParam(pkg)
+ list = append(list, par)
+ if variadic {
+ if isVariadic {
+ p.error("... not on final argument")
+ }
+ isVariadic = true
+ }
+ }
+ p.expect(')')
+
+ return types.NewTuple(list...), isVariadic
+}
+
+// ResultList = Type | ParamList .
+func (p *parser) parseResultList(pkg *types.Package) *types.Tuple {
+ switch p.tok {
+ case '<':
+ return types.NewTuple(types.NewParam(token.NoPos, pkg, "", p.parseType(pkg)))
+
+ case '(':
+ params, _ := p.parseParamList(pkg)
+ return params
+
+ default:
+ return nil
+ }
+}
+
+// FunctionType = ParamList ResultList .
+func (p *parser) parseFunctionType(pkg *types.Package) *types.Signature {
+ params, isVariadic := p.parseParamList(pkg)
+ results := p.parseResultList(pkg)
+ return types.NewSignature(pkg.Scope(), nil, params, results, isVariadic)
+}
+
+// Func = Name FunctionType .
+func (p *parser) parseFunc(pkg *types.Package) *types.Func {
+ name := p.parseName()
+ if strings.ContainsRune(name, '$') {
+ // This is a Type$equal or Type$hash function, which we don't want to parse,
+ // except for the types.
+ p.discardDirectiveWhileParsingTypes(pkg)
+ return nil
+ }
+ return types.NewFunc(token.NoPos, pkg, name, p.parseFunctionType(pkg))
+}
+
+// InterfaceType = "interface" "{" { ("?" Type | Func) ";" } "}" .
+func (p *parser) parseInterfaceType(pkg *types.Package) types.Type {
+ p.expectKeyword("interface")
+
+ var methods []*types.Func
+ var typs []*types.Named
+
+ p.expect('{')
+ for p.tok != '}' && p.tok != scanner.EOF {
+ if p.tok == '?' {
+ p.next()
+ typs = append(typs, p.parseType(pkg).(*types.Named))
+ } else {
+ method := p.parseFunc(pkg)
+ methods = append(methods, method)
+ }
+ p.expect(';')
+ }
+ p.expect('}')
+
+ return types.NewInterface(methods, typs)
+}
+
+// PointerType = "*" ("any" | Type) .
+func (p *parser) parsePointerType(pkg *types.Package) types.Type {
+ p.expect('*')
+ if p.tok == scanner.Ident {
+ p.expectKeyword("any")
+ return types.Typ[types.UnsafePointer]
+ }
+ return types.NewPointer(p.parseType(pkg))
+}
+
+// TypeDefinition = NamedType | MapType | ChanType | StructType | InterfaceType | PointerType | ArrayOrSliceType | FunctionType .
+func (p *parser) parseTypeDefinition(pkg *types.Package, n int) types.Type {
+ var t types.Type
+ switch p.tok {
+ case scanner.String:
+ t = p.parseNamedType(n)
+
+ case scanner.Ident:
+ switch p.lit {
+ case "map":
+ t = p.parseMapType(pkg)
+
+ case "chan":
+ t = p.parseChanType(pkg)
+
+ case "struct":
+ t = p.parseStructType(pkg)
+
+ case "interface":
+ t = p.parseInterfaceType(pkg)
+ }
+
+ case '*':
+ t = p.parsePointerType(pkg)
+
+ case '[':
+ t = p.parseArrayOrSliceType(pkg)
+
+ case '(':
+ t = p.parseFunctionType(pkg)
+ }
+
+ p.typeMap[n] = t
+ return t
+}
+
+const (
+ // From gofrontend/go/export.h
+ // Note that these values are negative in the gofrontend and have been made positive
+ // in the gccgoimporter.
+ gccgoBuiltinINT8 = 1
+ gccgoBuiltinINT16 = 2
+ gccgoBuiltinINT32 = 3
+ gccgoBuiltinINT64 = 4
+ gccgoBuiltinUINT8 = 5
+ gccgoBuiltinUINT16 = 6
+ gccgoBuiltinUINT32 = 7
+ gccgoBuiltinUINT64 = 8
+ gccgoBuiltinFLOAT32 = 9
+ gccgoBuiltinFLOAT64 = 10
+ gccgoBuiltinINT = 11
+ gccgoBuiltinUINT = 12
+ gccgoBuiltinUINTPTR = 13
+ gccgoBuiltinBOOL = 15
+ gccgoBuiltinSTRING = 16
+ gccgoBuiltinCOMPLEX64 = 17
+ gccgoBuiltinCOMPLEX128 = 18
+ gccgoBuiltinERROR = 19
+ gccgoBuiltinBYTE = 20
+ gccgoBuiltinRUNE = 21
+)
+
+func lookupBuiltinType(typ int) types.Type {
+ return [...]types.Type{
+ gccgoBuiltinINT8: types.Typ[types.Int8],
+ gccgoBuiltinINT16: types.Typ[types.Int16],
+ gccgoBuiltinINT32: types.Typ[types.Int32],
+ gccgoBuiltinINT64: types.Typ[types.Int64],
+ gccgoBuiltinUINT8: types.Typ[types.Uint8],
+ gccgoBuiltinUINT16: types.Typ[types.Uint16],
+ gccgoBuiltinUINT32: types.Typ[types.Uint32],
+ gccgoBuiltinUINT64: types.Typ[types.Uint64],
+ gccgoBuiltinFLOAT32: types.Typ[types.Float32],
+ gccgoBuiltinFLOAT64: types.Typ[types.Float64],
+ gccgoBuiltinINT: types.Typ[types.Int],
+ gccgoBuiltinUINT: types.Typ[types.Uint],
+ gccgoBuiltinUINTPTR: types.Typ[types.Uintptr],
+ gccgoBuiltinBOOL: types.Typ[types.Bool],
+ gccgoBuiltinSTRING: types.Typ[types.String],
+ gccgoBuiltinCOMPLEX64: types.Typ[types.Complex64],
+ gccgoBuiltinCOMPLEX128: types.Typ[types.Complex128],
+ gccgoBuiltinERROR: types.Universe.Lookup("error").Type(),
+ gccgoBuiltinBYTE: types.Typ[types.Byte],
+ gccgoBuiltinRUNE: types.Typ[types.Rune],
+ }[typ]
+}
+
+// Type = "<" "type" ( "-" int | int [ TypeDefinition ] ) ">" .
+func (p *parser) parseType(pkg *types.Package) (t types.Type) {
+ p.expect('<')
+ p.expectKeyword("type")
+
+ switch p.tok {
+ case scanner.Int:
+ n := p.parseInt()
+
+ if p.tok == '>' {
+ t = p.typeMap[int(n)]
+ } else {
+ t = p.parseTypeDefinition(pkg, int(n))
+ }
+
+ case '-':
+ p.next()
+ n := p.parseInt()
+ t = lookupBuiltinType(int(n))
+
+ default:
+ p.errorf("expected type number, got %s (%q)", scanner.TokenString(p.tok), p.lit)
+ return nil
+ }
+
+ p.expect('>')
+ return
+}
+
+// PackageInit = unquotedString unquotedString int .
+func (p *parser) parsePackageInit() PackageInit {
+ name := p.parseUnquotedString()
+ initfunc := p.parseUnquotedString()
+ priority := int(p.parseInt())
+ return PackageInit{Name: name, InitFunc: initfunc, Priority: priority}
+}
+
+// Throw away tokens until we see a ';'. If we see a '<', attempt to parse as a type.
+func (p *parser) discardDirectiveWhileParsingTypes(pkg *types.Package) {
+ for {
+ switch p.tok {
+ case ';':
+ return
+ case '<':
+ p.parseType(p.pkg)
+ case scanner.EOF:
+ p.error("unexpected EOF")
+ default:
+ p.next()
+ }
+ }
+}
+
+// Create the package if we have parsed both the package path and package name.
+func (p *parser) maybeCreatePackage() {
+ if p.pkgname != "" && p.pkgpath != "" {
+ p.pkg = p.getPkg(p.pkgpath, p.pkgname)
+ }
+}
+
+// InitDataDirective = "v1" ";" |
+// "priority" int ";" |
+// "init" { PackageInit } ";" |
+// "checksum" unquotedString ";" .
+func (p *parser) parseInitDataDirective() {
+ if p.tok != scanner.Ident {
+ // unexpected token kind; panic
+ p.expect(scanner.Ident)
+ }
+
+ switch p.lit {
+ case "v1":
+ p.next()
+ p.expect(';')
+
+ case "priority":
+ p.next()
+ p.initdata.Priority = int(p.parseInt())
+ p.expect(';')
+
+ case "init":
+ p.next()
+ for p.tok != ';' && p.tok != scanner.EOF {
+ p.initdata.Inits = append(p.initdata.Inits, p.parsePackageInit())
+ }
+ p.expect(';')
+
+ case "checksum":
+ // Don't let the scanner try to parse the checksum as a number.
+ defer func(mode uint) {
+ p.scanner.Mode = mode
+ }(p.scanner.Mode)
+ p.scanner.Mode &^= scanner.ScanInts | scanner.ScanFloats
+ p.next()
+ p.parseUnquotedString()
+ p.expect(';')
+
+ default:
+ p.errorf("unexpected identifier: %q", p.lit)
+ }
+}
+
+// Directive = InitDataDirective |
+// "package" unquotedString ";" |
+// "pkgpath" unquotedString ";" |
+// "import" unquotedString unquotedString string ";" |
+// "func" Func ";" |
+// "type" Type ";" |
+// "var" Var ";" |
+// "const" Const ";" .
+func (p *parser) parseDirective() {
+ if p.tok != scanner.Ident {
+ // unexpected token kind; panic
+ p.expect(scanner.Ident)
+ }
+
+ switch p.lit {
+ case "v1", "priority", "init", "checksum":
+ p.parseInitDataDirective()
+
+ case "package":
+ p.next()
+ p.pkgname = p.parseUnquotedString()
+ p.maybeCreatePackage()
+ p.expect(';')
+
+ case "pkgpath":
+ p.next()
+ p.pkgpath = p.parseUnquotedString()
+ p.maybeCreatePackage()
+ p.expect(';')
+
+ case "import":
+ p.next()
+ pkgname := p.parseUnquotedString()
+ pkgpath := p.parseUnquotedString()
+ p.getPkg(pkgpath, pkgname)
+ p.parseString()
+ p.expect(';')
+
+ case "func":
+ p.next()
+ fun := p.parseFunc(p.pkg)
+ if fun != nil {
+ p.pkg.Scope().Insert(fun)
+ }
+ p.expect(';')
+
+ case "type":
+ p.next()
+ p.parseType(p.pkg)
+ p.expect(';')
+
+ case "var":
+ p.next()
+ v := p.parseVar(p.pkg)
+ p.pkg.Scope().Insert(v)
+ p.expect(';')
+
+ case "const":
+ p.next()
+ c := p.parseConst(p.pkg)
+ p.pkg.Scope().Insert(c)
+ p.expect(';')
+
+ default:
+ p.errorf("unexpected identifier: %q", p.lit)
+ }
+}
+
+// Package = { Directive } .
+func (p *parser) parsePackage() *types.Package {
+ for p.tok != scanner.EOF {
+ p.parseDirective()
+ }
+ for _, typ := range p.typeMap {
+ if it, ok := typ.(*types.Interface); ok {
+ it.Complete()
+ }
+ }
+ p.pkg.MarkComplete()
+ return p.pkg
+}
+
+// InitData = { InitDataDirective } .
+func (p *parser) parseInitData() {
+ for p.tok != scanner.EOF {
+ p.parseInitDataDirective()
+ }
+}
diff --git a/llgo/third_party/go.tools/go/gccgoimporter/parser_test.go b/llgo/third_party/go.tools/go/gccgoimporter/parser_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..2f5e4496eb75edc93ca993b35363e026891811c6
--- /dev/null
+++ b/llgo/third_party/go.tools/go/gccgoimporter/parser_test.go
@@ -0,0 +1,73 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gccgoimporter
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+ "text/scanner"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+var typeParserTests = []struct {
+ id, typ, want, underlying, methods string
+}{
+ {id: "foo", typ: "", want: "int8"},
+ {id: "foo", typ: ">", want: "*error"},
+ {id: "foo", typ: "", want: "unsafe.Pointer"},
+ {id: "foo", typ: ">>", want: "foo.Bar", underlying: "*foo.Bar"},
+ {id: "foo", typ: " func (? ) M (); >", want: "bar.Foo", underlying: "int8", methods: "func (bar.Foo).M()"},
+ {id: "foo", typ: ">", want: "bar.foo", underlying: "int8"},
+ {id: "foo", typ: ">", want: "[]int8"},
+ {id: "foo", typ: ">", want: "[42]int8"},
+ {id: "foo", typ: "] >", want: "map[int8]int16"},
+ {id: "foo", typ: ">", want: "chan int8"},
+ {id: "foo", typ: ">", want: "<-chan int8"},
+ {id: "foo", typ: ">", want: "chan<- int8"},
+ {id: "foo", typ: "; I16 \"i16\"; }>", want: "struct{I8 int8; I16 int16 \"i16\"}"},
+ {id: "foo", typ: ", b ) ; Bar (? , ? ...) (? , ? ); Baz (); }>", want: "interface{Bar(int16, ...int8) (int16, int8); Baz(); Foo(a int8, b int16) int8}"},
+ {id: "foo", typ: ") >", want: "func(int8) int16"},
+}
+
+func TestTypeParser(t *testing.T) {
+ for _, test := range typeParserTests {
+ var p parser
+ p.init("test.gox", strings.NewReader(test.typ), make(map[string]*types.Package))
+ p.pkgname = test.id
+ p.pkgpath = test.id
+ p.maybeCreatePackage()
+ typ := p.parseType(p.pkg)
+
+ if p.tok != scanner.EOF {
+ t.Errorf("expected full parse, stopped at %q", p.lit)
+ }
+
+ got := typ.String()
+ if got != test.want {
+ t.Errorf("got type %q, expected %q", got, test.want)
+ }
+
+ if test.underlying != "" {
+ underlying := typ.Underlying().String()
+ if underlying != test.underlying {
+ t.Errorf("got underlying type %q, expected %q", underlying, test.underlying)
+ }
+ }
+
+ if test.methods != "" {
+ nt := typ.(*types.Named)
+ var buf bytes.Buffer
+ for i := 0; i != nt.NumMethods(); i++ {
+ buf.WriteString(nt.Method(i).String())
+ }
+ methods := buf.String()
+ if methods != test.methods {
+ t.Errorf("got methods %q, expected %q", methods, test.methods)
+ }
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/go/gccgoimporter/testdata/complexnums.go b/llgo/third_party/go.tools/go/gccgoimporter/testdata/complexnums.go
new file mode 100644
index 0000000000000000000000000000000000000000..a51b6b01c0be3ac95ea5436aecbead5b69f21cc5
--- /dev/null
+++ b/llgo/third_party/go.tools/go/gccgoimporter/testdata/complexnums.go
@@ -0,0 +1,6 @@
+package complexnums
+
+const NN = -1 - 1i
+const NP = -1 + 1i
+const PN = 1 - 1i
+const PP = 1 + 1i
diff --git a/llgo/third_party/go.tools/go/gccgoimporter/testdata/complexnums.gox b/llgo/third_party/go.tools/go/gccgoimporter/testdata/complexnums.gox
new file mode 100644
index 0000000000000000000000000000000000000000..b66524f80edd51d9837b59cae36f73a9fa4f4e18
--- /dev/null
+++ b/llgo/third_party/go.tools/go/gccgoimporter/testdata/complexnums.gox
@@ -0,0 +1,8 @@
+v1;
+package complexnums;
+pkgpath complexnums;
+priority 1;
+const NN = -0.1E1-0.1E1i ;
+const NP = -0.1E1+0.1E1i ;
+const PN = 0.1E1-0.1E1i ;
+const PP = 0.1E1+0.1E1i ;
diff --git a/llgo/third_party/go.tools/go/gccgoimporter/testdata/imports.go b/llgo/third_party/go.tools/go/gccgoimporter/testdata/imports.go
new file mode 100644
index 0000000000000000000000000000000000000000..7907316a607fc48a91acf63318a863d481a2de86
--- /dev/null
+++ b/llgo/third_party/go.tools/go/gccgoimporter/testdata/imports.go
@@ -0,0 +1,5 @@
+package imports
+
+import "fmt"
+
+var Hello = fmt.Sprintf("Hello, world")
diff --git a/llgo/third_party/go.tools/go/gccgoimporter/testdata/imports.gox b/llgo/third_party/go.tools/go/gccgoimporter/testdata/imports.gox
new file mode 100644
index 0000000000000000000000000000000000000000..958a4f5b82109a94bee63ebdca66c6aea0b89dda
--- /dev/null
+++ b/llgo/third_party/go.tools/go/gccgoimporter/testdata/imports.gox
@@ -0,0 +1,7 @@
+v1;
+package imports;
+pkgpath imports;
+priority 7;
+import fmt fmt "fmt";
+init imports imports..import 7 math math..import 1 runtime runtime..import 1 strconv strconv..import 2 io io..import 3 reflect reflect..import 3 syscall syscall..import 3 time time..import 4 os os..import 5 fmt fmt..import 6;
+var Hello ;
diff --git a/llgo/third_party/go.tools/go/gccgoimporter/testdata/pointer.go b/llgo/third_party/go.tools/go/gccgoimporter/testdata/pointer.go
new file mode 100644
index 0000000000000000000000000000000000000000..4ebc67137d68bce06a609f6680341db7043d2a48
--- /dev/null
+++ b/llgo/third_party/go.tools/go/gccgoimporter/testdata/pointer.go
@@ -0,0 +1,3 @@
+package pointer
+
+type Int8Ptr *int8
diff --git a/llgo/third_party/go.tools/go/gccgoimporter/testdata/pointer.gox b/llgo/third_party/go.tools/go/gccgoimporter/testdata/pointer.gox
new file mode 100644
index 0000000000000000000000000000000000000000..d96ebbdd1418933ac6919ec05efca99ae0508345
--- /dev/null
+++ b/llgo/third_party/go.tools/go/gccgoimporter/testdata/pointer.gox
@@ -0,0 +1,4 @@
+v1;
+package pointer;
+pkgpath pointer;
+type >>;
diff --git a/llgo/third_party/go.tools/go/gcimporter/exportdata.go b/llgo/third_party/go.tools/go/gcimporter/exportdata.go
new file mode 100644
index 0000000000000000000000000000000000000000..657742bb6d79dc9f3d852e1f7e05102aaa618f4a
--- /dev/null
+++ b/llgo/third_party/go.tools/go/gcimporter/exportdata.go
@@ -0,0 +1,108 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements FindExportData.
+
+package gcimporter
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+func readGopackHeader(r *bufio.Reader) (name string, size int, err error) {
+ // See $GOROOT/include/ar.h.
+ hdr := make([]byte, 16+12+6+6+8+10+2)
+ _, err = io.ReadFull(r, hdr)
+ if err != nil {
+ return
+ }
+ // leave for debugging
+ if false {
+ fmt.Printf("header: %s", hdr)
+ }
+ s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
+ size, err = strconv.Atoi(s)
+ if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
+ err = errors.New("invalid archive header")
+ return
+ }
+ name = strings.TrimSpace(string(hdr[:16]))
+ return
+}
+
+// FindExportData positions the reader r at the beginning of the
+// export data section of an underlying GC-created object/archive
+// file by reading from it. The reader must be positioned at the
+// start of the file before calling this function.
+//
+func FindExportData(r *bufio.Reader) (err error) {
+ // Read first line to make sure this is an object file.
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ return
+ }
+ if string(line) == "!\n" {
+ // Archive file. Scan to __.PKGDEF.
+ var name string
+ var size int
+ if name, size, err = readGopackHeader(r); err != nil {
+ return
+ }
+
+ // Optional leading __.GOSYMDEF or __.SYMDEF.
+ // Read and discard.
+ if name == "__.SYMDEF" || name == "__.GOSYMDEF" {
+ const block = 4096
+ tmp := make([]byte, block)
+ for size > 0 {
+ n := size
+ if n > block {
+ n = block
+ }
+ if _, err = io.ReadFull(r, tmp[:n]); err != nil {
+ return
+ }
+ size -= n
+ }
+
+ if name, size, err = readGopackHeader(r); err != nil {
+ return
+ }
+ }
+
+ // First real entry should be __.PKGDEF.
+ if name != "__.PKGDEF" {
+ err = errors.New("go archive is missing __.PKGDEF")
+ return
+ }
+
+ // Read first line of __.PKGDEF data, so that line
+ // is once again the first line of the input.
+ if line, err = r.ReadSlice('\n'); err != nil {
+ return
+ }
+ }
+
+ // Now at __.PKGDEF in archive or still at beginning of file.
+ // Either way, line should begin with "go object ".
+ if !strings.HasPrefix(string(line), "go object ") {
+ err = errors.New("not a go object file")
+ return
+ }
+
+ // Skip over object header to export data.
+ // Begins after first line with $$.
+ for line[0] != '$' {
+ if line, err = r.ReadSlice('\n'); err != nil {
+ return
+ }
+ }
+
+ return
+}
diff --git a/llgo/third_party/go.tools/go/gcimporter/gcimporter.go b/llgo/third_party/go.tools/go/gcimporter/gcimporter.go
new file mode 100644
index 0000000000000000000000000000000000000000..45668a0472aea00d3b589d6b342f4f7b492abb2a
--- /dev/null
+++ b/llgo/third_party/go.tools/go/gcimporter/gcimporter.go
@@ -0,0 +1,961 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gcimporter implements Import for gc-generated object files.
+// Importing this package installs Import as go/types.DefaultImport.
+package gcimporter
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "go/build"
+ "go/token"
+ "io"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "text/scanner"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// debugging/development support
+const debug = false
+
+func init() {
+ types.DefaultImport = Import
+}
+
+var pkgExts = [...]string{".a", ".5", ".6", ".8"}
+
+// FindPkg returns the filename and unique package id for an import
+// path based on package information provided by build.Import (using
+// the build.Default build.Context).
+// If no file was found, an empty filename is returned.
+//
+func FindPkg(path, srcDir string) (filename, id string) {
+ if len(path) == 0 {
+ return
+ }
+
+ id = path
+ var noext string
+ switch {
+ default:
+ // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
+ // Don't require the source files to be present.
+ bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
+ if bp.PkgObj == "" {
+ return
+ }
+ noext = strings.TrimSuffix(bp.PkgObj, ".a")
+
+ case build.IsLocalImport(path):
+ // "./x" -> "/this/directory/x.ext", "/this/directory/x"
+ noext = filepath.Join(srcDir, path)
+ id = noext
+
+ case filepath.IsAbs(path):
+ // for completeness only - go/build.Import
+ // does not support absolute imports
+ // "/x" -> "/x.ext", "/x"
+ noext = path
+ }
+
+ // try extensions
+ for _, ext := range pkgExts {
+ filename = noext + ext
+ if f, err := os.Stat(filename); err == nil && !f.IsDir() {
+ return
+ }
+ }
+
+ filename = "" // not found
+ return
+}
+
+// ImportData imports a package by reading the gc-generated export data,
+// adds the corresponding package object to the imports map indexed by id,
+// and returns the object.
+//
+// The imports map must contains all packages already imported. The data
+// reader position must be the beginning of the export data section. The
+// filename is only used in error messages.
+//
+// If imports[id] contains the completely imported package, that package
+// can be used directly, and there is no need to call this function (but
+// there is also no harm but for extra time used).
+//
+func ImportData(imports map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) {
+ // support for parser error handling
+ defer func() {
+ switch r := recover().(type) {
+ case nil:
+ // nothing to do
+ case importError:
+ err = r
+ default:
+ panic(r) // internal error
+ }
+ }()
+
+ var p parser
+ p.init(filename, id, data, imports)
+ pkg = p.parseExport()
+
+ return
+}
+
+// Import imports a gc-generated package given its import path, adds the
+// corresponding package object to the imports map, and returns the object.
+// Local import paths are interpreted relative to the current working directory.
+// The imports map must contains all packages already imported.
+//
+func Import(imports map[string]*types.Package, path string) (pkg *types.Package, err error) {
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+
+ srcDir := "."
+ if build.IsLocalImport(path) {
+ srcDir, err = os.Getwd()
+ if err != nil {
+ return
+ }
+ }
+
+ filename, id := FindPkg(path, srcDir)
+ if filename == "" {
+ err = fmt.Errorf("can't find import: %s", id)
+ return
+ }
+
+ // no need to re-import if the package was imported completely before
+ if pkg = imports[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+
+ // open file
+ f, err := os.Open(filename)
+ if err != nil {
+ return
+ }
+ defer func() {
+ f.Close()
+ if err != nil {
+ // add file name to error
+ err = fmt.Errorf("reading export data: %s: %v", filename, err)
+ }
+ }()
+
+ buf := bufio.NewReader(f)
+ if err = FindExportData(buf); err != nil {
+ return
+ }
+
+ pkg, err = ImportData(imports, filename, id, buf)
+
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Parser
+
+// TODO(gri) Imported objects don't have position information.
+// Ideally use the debug table line info; alternatively
+// create some fake position (or the position of the
+// import). That way error messages referring to imported
+// objects can print meaningful information.
+
+// parser parses the exports inside a gc compiler-produced
+// object/archive file and populates its scope with the results.
+type parser struct {
+ scanner scanner.Scanner
+ tok rune // current token
+ lit string // literal string; only valid for Ident, Int, String tokens
+ id string // package id of imported package
+ imports map[string]*types.Package // package id -> package object
+}
+
+func (p *parser) init(filename, id string, src io.Reader, imports map[string]*types.Package) {
+ p.scanner.Init(src)
+ p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) }
+ p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
+ p.scanner.Whitespace = 1<<'\t' | 1<<' '
+ p.scanner.Filename = filename // for good error messages
+ p.next()
+ p.id = id
+ p.imports = imports
+ if debug {
+ // check consistency of imports map
+ for _, pkg := range imports {
+ if pkg.Name() == "" {
+ fmt.Printf("no package name for %s\n", pkg.Path())
+ }
+ }
+ }
+}
+
+func (p *parser) next() {
+ p.tok = p.scanner.Scan()
+ switch p.tok {
+ case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·':
+ p.lit = p.scanner.TokenText()
+ default:
+ p.lit = ""
+ }
+ if debug {
+ fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit)
+ }
+}
+
+func declTypeName(pkg *types.Package, name string) *types.TypeName {
+ scope := pkg.Scope()
+ if obj := scope.Lookup(name); obj != nil {
+ return obj.(*types.TypeName)
+ }
+ obj := types.NewTypeName(token.NoPos, pkg, name, nil)
+ // a named type may be referred to before the underlying type
+ // is known - set it up
+ types.NewNamed(obj, nil, nil)
+ scope.Insert(obj)
+ return obj
+}
+
+// ----------------------------------------------------------------------------
+// Error handling
+
+// Internal errors are boxed as importErrors.
+type importError struct {
+ pos scanner.Position
+ err error
+}
+
+func (e importError) Error() string {
+ return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err)
+}
+
+func (p *parser) error(err interface{}) {
+ if s, ok := err.(string); ok {
+ err = errors.New(s)
+ }
+ // panic with a runtime.Error if err is not an error
+ panic(importError{p.scanner.Pos(), err.(error)})
+}
+
+func (p *parser) errorf(format string, args ...interface{}) {
+ p.error(fmt.Sprintf(format, args...))
+}
+
+func (p *parser) expect(tok rune) string {
+ lit := p.lit
+ if p.tok != tok {
+ p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit)
+ }
+ p.next()
+ return lit
+}
+
+func (p *parser) expectSpecial(tok string) {
+ sep := 'x' // not white space
+ i := 0
+ for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' {
+ sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
+ p.next()
+ i++
+ }
+ if i < len(tok) {
+ p.errorf("expected %q, got %q", tok, tok[0:i])
+ }
+}
+
+func (p *parser) expectKeyword(keyword string) {
+ lit := p.expect(scanner.Ident)
+ if lit != keyword {
+ p.errorf("expected keyword %s, got %q", keyword, lit)
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Qualified and unqualified names
+
+// PackageId = string_lit .
+//
+func (p *parser) parsePackageId() string {
+ id, err := strconv.Unquote(p.expect(scanner.String))
+ if err != nil {
+ p.error(err)
+ }
+ // id == "" stands for the imported package id
+ // (only known at time of package installation)
+ if id == "" {
+ id = p.id
+ }
+ return id
+}
+
+// PackageName = ident .
+//
+func (p *parser) parsePackageName() string {
+ return p.expect(scanner.Ident)
+}
+
+// dotIdentifier = ( ident | '·' ) { ident | int | '·' } .
+func (p *parser) parseDotIdent() string {
+ ident := ""
+ if p.tok != scanner.Int {
+ sep := 'x' // not white space
+ for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' {
+ ident += p.lit
+ sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
+ p.next()
+ }
+ }
+ if ident == "" {
+ p.expect(scanner.Ident) // use expect() for error handling
+ }
+ return ident
+}
+
+// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) .
+//
+func (p *parser) parseQualifiedName() (id, name string) {
+ p.expect('@')
+ id = p.parsePackageId()
+ p.expect('.')
+ // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields.
+ if p.tok == '?' {
+ p.next()
+ } else {
+ name = p.parseDotIdent()
+ }
+ return
+}
+
+// getPkg returns the package for a given id. If the package is
+// not found but we have a package name, create the package and
+// add it to the p.imports map.
+//
+func (p *parser) getPkg(id, name string) *types.Package {
+ // package unsafe is not in the imports map - handle explicitly
+ if id == "unsafe" {
+ return types.Unsafe
+ }
+ pkg := p.imports[id]
+ if pkg == nil && name != "" {
+ pkg = types.NewPackage(id, name)
+ p.imports[id] = pkg
+ }
+ return pkg
+}
+
+// parseExportedName is like parseQualifiedName, but
+// the package id is resolved to an imported *types.Package.
+//
+func (p *parser) parseExportedName() (pkg *types.Package, name string) {
+ id, name := p.parseQualifiedName()
+ pkg = p.getPkg(id, "")
+ if pkg == nil {
+ p.errorf("%s package not found", id)
+ }
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Types
+
+// BasicType = identifier .
+//
+func (p *parser) parseBasicType() types.Type {
+ id := p.expect(scanner.Ident)
+ obj := types.Universe.Lookup(id)
+ if obj, ok := obj.(*types.TypeName); ok {
+ return obj.Type()
+ }
+ p.errorf("not a basic type: %s", id)
+ return nil
+}
+
+// ArrayType = "[" int_lit "]" Type .
+//
+func (p *parser) parseArrayType() types.Type {
+ // "[" already consumed and lookahead known not to be "]"
+ lit := p.expect(scanner.Int)
+ p.expect(']')
+ elem := p.parseType()
+ n, err := strconv.ParseInt(lit, 10, 64)
+ if err != nil {
+ p.error(err)
+ }
+ return types.NewArray(elem, n)
+}
+
+// MapType = "map" "[" Type "]" Type .
+//
+func (p *parser) parseMapType() types.Type {
+ p.expectKeyword("map")
+ p.expect('[')
+ key := p.parseType()
+ p.expect(']')
+ elem := p.parseType()
+ return types.NewMap(key, elem)
+}
+
+// Name = identifier | "?" | QualifiedName .
+//
+// If materializePkg is set, the returned package is guaranteed to be set.
+// For fully qualified names, the returned package may be a fake package
+// (without name, scope, and not in the p.imports map), created for the
+// sole purpose of providing a package path. Fake packages are created
+// when the package id is not found in the p.imports map; in that case
+// we cannot create a real package because we don't have a package name.
+// For non-qualified names, the returned package is the imported package.
+//
+func (p *parser) parseName(materializePkg bool) (pkg *types.Package, name string) {
+ switch p.tok {
+ case scanner.Ident:
+ pkg = p.imports[p.id]
+ name = p.lit
+ p.next()
+ case '?':
+ // anonymous
+ pkg = p.imports[p.id]
+ p.next()
+ case '@':
+ // exported name prefixed with package path
+ var id string
+ id, name = p.parseQualifiedName()
+ if materializePkg {
+ // we don't have a package name - if the package
+ // doesn't exist yet, create a fake package instead
+ pkg = p.getPkg(id, "")
+ if pkg == nil {
+ pkg = types.NewPackage(id, "")
+ }
+ }
+ default:
+ p.error("name expected")
+ }
+ return
+}
+
+func deref(typ types.Type) types.Type {
+ if p, _ := typ.(*types.Pointer); p != nil {
+ return p.Elem()
+ }
+ return typ
+}
+
+// Field = Name Type [ string_lit ] .
+//
+func (p *parser) parseField() (*types.Var, string) {
+ pkg, name := p.parseName(true)
+ typ := p.parseType()
+ anonymous := false
+ if name == "" {
+ // anonymous field - typ must be T or *T and T must be a type name
+ switch typ := deref(typ).(type) {
+ case *types.Basic: // basic types are named types
+ pkg = nil
+ name = typ.Name()
+ case *types.Named:
+ name = typ.Obj().Name()
+ default:
+ p.errorf("anonymous field expected")
+ }
+ anonymous = true
+ }
+ tag := ""
+ if p.tok == scanner.String {
+ s := p.expect(scanner.String)
+ var err error
+ tag, err = strconv.Unquote(s)
+ if err != nil {
+ p.errorf("invalid struct tag %s: %s", s, err)
+ }
+ }
+ return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag
+}
+
+// StructType = "struct" "{" [ FieldList ] "}" .
+// FieldList = Field { ";" Field } .
+//
+func (p *parser) parseStructType() types.Type {
+ var fields []*types.Var
+ var tags []string
+
+ p.expectKeyword("struct")
+ p.expect('{')
+ for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ {
+ if i > 0 {
+ p.expect(';')
+ }
+ fld, tag := p.parseField()
+ if tag != "" && tags == nil {
+ tags = make([]string, i)
+ }
+ if tags != nil {
+ tags = append(tags, tag)
+ }
+ fields = append(fields, fld)
+ }
+ p.expect('}')
+
+ return types.NewStruct(fields, tags)
+}
+
+// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] .
+//
+func (p *parser) parseParameter() (par *types.Var, isVariadic bool) {
+ _, name := p.parseName(false)
+ // remove gc-specific parameter numbering
+ if i := strings.Index(name, "·"); i >= 0 {
+ name = name[:i]
+ }
+ if p.tok == '.' {
+ p.expectSpecial("...")
+ isVariadic = true
+ }
+ typ := p.parseType()
+ if isVariadic {
+ typ = types.NewSlice(typ)
+ }
+ // ignore argument tag (e.g. "noescape")
+ if p.tok == scanner.String {
+ p.next()
+ }
+ // TODO(gri) should we provide a package?
+ par = types.NewVar(token.NoPos, nil, name, typ)
+ return
+}
+
+// Parameters = "(" [ ParameterList ] ")" .
+// ParameterList = { Parameter "," } Parameter .
+//
+func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) {
+ p.expect('(')
+ for p.tok != ')' && p.tok != scanner.EOF {
+ if len(list) > 0 {
+ p.expect(',')
+ }
+ par, variadic := p.parseParameter()
+ list = append(list, par)
+ if variadic {
+ if isVariadic {
+ p.error("... not on final argument")
+ }
+ isVariadic = true
+ }
+ }
+ p.expect(')')
+
+ return
+}
+
+// Signature = Parameters [ Result ] .
+// Result = Type | Parameters .
+//
+func (p *parser) parseSignature(recv *types.Var) *types.Signature {
+ params, isVariadic := p.parseParameters()
+
+ // optional result type
+ var results []*types.Var
+ if p.tok == '(' {
+ var variadic bool
+ results, variadic = p.parseParameters()
+ if variadic {
+ p.error("... not permitted on result type")
+ }
+ }
+
+ return types.NewSignature(nil, recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic)
+}
+
+// InterfaceType = "interface" "{" [ MethodList ] "}" .
+// MethodList = Method { ";" Method } .
+// Method = Name Signature .
+//
+// The methods of embedded interfaces are always "inlined"
+// by the compiler and thus embedded interfaces are never
+// visible in the export data.
+//
+func (p *parser) parseInterfaceType() types.Type {
+ var methods []*types.Func
+
+ p.expectKeyword("interface")
+ p.expect('{')
+ for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ {
+ if i > 0 {
+ p.expect(';')
+ }
+ pkg, name := p.parseName(true)
+ sig := p.parseSignature(nil)
+ methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig))
+ }
+ p.expect('}')
+
+ // Complete requires the type's embedded interfaces to be fully defined,
+ // but we do not define any
+ return types.NewInterface(methods, nil).Complete()
+}
+
+// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
+//
+func (p *parser) parseChanType() types.Type {
+ dir := types.SendRecv
+ if p.tok == scanner.Ident {
+ p.expectKeyword("chan")
+ if p.tok == '<' {
+ p.expectSpecial("<-")
+ dir = types.SendOnly
+ }
+ } else {
+ p.expectSpecial("<-")
+ p.expectKeyword("chan")
+ dir = types.RecvOnly
+ }
+ elem := p.parseType()
+ return types.NewChan(dir, elem)
+}
+
+// Type =
+// BasicType | TypeName | ArrayType | SliceType | StructType |
+// PointerType | FuncType | InterfaceType | MapType | ChanType |
+// "(" Type ")" .
+//
+// BasicType = ident .
+// TypeName = ExportedName .
+// SliceType = "[" "]" Type .
+// PointerType = "*" Type .
+// FuncType = "func" Signature .
+//
+func (p *parser) parseType() types.Type {
+ switch p.tok {
+ case scanner.Ident:
+ switch p.lit {
+ default:
+ return p.parseBasicType()
+ case "struct":
+ return p.parseStructType()
+ case "func":
+ // FuncType
+ p.next()
+ return p.parseSignature(nil)
+ case "interface":
+ return p.parseInterfaceType()
+ case "map":
+ return p.parseMapType()
+ case "chan":
+ return p.parseChanType()
+ }
+ case '@':
+ // TypeName
+ pkg, name := p.parseExportedName()
+ return declTypeName(pkg, name).Type()
+ case '[':
+ p.next() // look ahead
+ if p.tok == ']' {
+ // SliceType
+ p.next()
+ return types.NewSlice(p.parseType())
+ }
+ return p.parseArrayType()
+ case '*':
+ // PointerType
+ p.next()
+ return types.NewPointer(p.parseType())
+ case '<':
+ return p.parseChanType()
+ case '(':
+ // "(" Type ")"
+ p.next()
+ typ := p.parseType()
+ p.expect(')')
+ return typ
+ }
+ p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit)
+ return nil
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+// ImportDecl = "import" PackageName PackageId .
+//
+func (p *parser) parseImportDecl() {
+ p.expectKeyword("import")
+ name := p.parsePackageName()
+ p.getPkg(p.parsePackageId(), name)
+}
+
+// int_lit = [ "+" | "-" ] { "0" ... "9" } .
+//
+func (p *parser) parseInt() string {
+ s := ""
+ switch p.tok {
+ case '-':
+ s = "-"
+ p.next()
+ case '+':
+ p.next()
+ }
+ return s + p.expect(scanner.Int)
+}
+
+// number = int_lit [ "p" int_lit ] .
+//
+func (p *parser) parseNumber() (typ *types.Basic, val exact.Value) {
+ // mantissa
+ mant := exact.MakeFromLiteral(p.parseInt(), token.INT)
+ if mant == nil {
+ panic("invalid mantissa")
+ }
+
+ if p.lit == "p" {
+ // exponent (base 2)
+ p.next()
+ exp, err := strconv.ParseInt(p.parseInt(), 10, 0)
+ if err != nil {
+ p.error(err)
+ }
+ if exp < 0 {
+ denom := exact.MakeInt64(1)
+ denom = exact.Shift(denom, token.SHL, uint(-exp))
+ typ = types.Typ[types.UntypedFloat]
+ val = exact.BinaryOp(mant, token.QUO, denom)
+ return
+ }
+ if exp > 0 {
+ mant = exact.Shift(mant, token.SHL, uint(exp))
+ }
+ typ = types.Typ[types.UntypedFloat]
+ val = mant
+ return
+ }
+
+ typ = types.Typ[types.UntypedInt]
+ val = mant
+ return
+}
+
+// ConstDecl = "const" ExportedName [ Type ] "=" Literal .
+// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit .
+// bool_lit = "true" | "false" .
+// complex_lit = "(" float_lit "+" float_lit "i" ")" .
+// rune_lit = "(" int_lit "+" int_lit ")" .
+// string_lit = `"` { unicode_char } `"` .
+//
+func (p *parser) parseConstDecl() {
+ p.expectKeyword("const")
+ pkg, name := p.parseExportedName()
+
+ var typ0 types.Type
+ if p.tok != '=' {
+ typ0 = p.parseType()
+ }
+
+ p.expect('=')
+ var typ types.Type
+ var val exact.Value
+ switch p.tok {
+ case scanner.Ident:
+ // bool_lit
+ if p.lit != "true" && p.lit != "false" {
+ p.error("expected true or false")
+ }
+ typ = types.Typ[types.UntypedBool]
+ val = exact.MakeBool(p.lit == "true")
+ p.next()
+
+ case '-', scanner.Int:
+ // int_lit
+ typ, val = p.parseNumber()
+
+ case '(':
+ // complex_lit or rune_lit
+ p.next()
+ if p.tok == scanner.Char {
+ p.next()
+ p.expect('+')
+ typ = types.Typ[types.UntypedRune]
+ _, val = p.parseNumber()
+ p.expect(')')
+ break
+ }
+ _, re := p.parseNumber()
+ p.expect('+')
+ _, im := p.parseNumber()
+ p.expectKeyword("i")
+ p.expect(')')
+ typ = types.Typ[types.UntypedComplex]
+ val = exact.BinaryOp(re, token.ADD, exact.MakeImag(im))
+
+ case scanner.Char:
+ // rune_lit
+ typ = types.Typ[types.UntypedRune]
+ val = exact.MakeFromLiteral(p.lit, token.CHAR)
+ p.next()
+
+ case scanner.String:
+ // string_lit
+ typ = types.Typ[types.UntypedString]
+ val = exact.MakeFromLiteral(p.lit, token.STRING)
+ p.next()
+
+ default:
+ p.errorf("expected literal got %s", scanner.TokenString(p.tok))
+ }
+
+ if typ0 == nil {
+ typ0 = typ
+ }
+
+ pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val))
+}
+
+// TypeDecl = "type" ExportedName Type .
+//
+func (p *parser) parseTypeDecl() {
+ p.expectKeyword("type")
+ pkg, name := p.parseExportedName()
+ obj := declTypeName(pkg, name)
+
+ // The type object may have been imported before and thus already
+ // have a type associated with it. We still need to parse the type
+ // structure, but throw it away if the object already has a type.
+ // This ensures that all imports refer to the same type object for
+ // a given type declaration.
+ typ := p.parseType()
+
+ if name := obj.Type().(*types.Named); name.Underlying() == nil {
+ name.SetUnderlying(typ)
+ }
+}
+
+// VarDecl = "var" ExportedName Type .
+//
+func (p *parser) parseVarDecl() {
+ p.expectKeyword("var")
+ pkg, name := p.parseExportedName()
+ typ := p.parseType()
+ pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ))
+}
+
+// Func = Signature [ Body ] .
+// Body = "{" ... "}" .
+//
+func (p *parser) parseFunc(recv *types.Var) *types.Signature {
+ sig := p.parseSignature(recv)
+ if p.tok == '{' {
+ p.next()
+ for i := 1; i > 0; p.next() {
+ switch p.tok {
+ case '{':
+ i++
+ case '}':
+ i--
+ }
+ }
+ }
+ return sig
+}
+
+// MethodDecl = "func" Receiver Name Func .
+// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" .
+//
+func (p *parser) parseMethodDecl() {
+ // "func" already consumed
+ p.expect('(')
+ recv, _ := p.parseParameter() // receiver
+ p.expect(')')
+
+ // determine receiver base type object
+ base := deref(recv.Type()).(*types.Named)
+
+ // parse method name, signature, and possibly inlined body
+ _, name := p.parseName(true)
+ sig := p.parseFunc(recv)
+
+ // methods always belong to the same package as the base type object
+ pkg := base.Obj().Pkg()
+
+ // add method to type unless type was imported before
+ // and method exists already
+ // TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small.
+ base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig))
+}
+
+// FuncDecl = "func" ExportedName Func .
+//
+func (p *parser) parseFuncDecl() {
+ // "func" already consumed
+ pkg, name := p.parseExportedName()
+ typ := p.parseFunc(nil)
+ pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ))
+}
+
+// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" .
+//
+func (p *parser) parseDecl() {
+ if p.tok == scanner.Ident {
+ switch p.lit {
+ case "import":
+ p.parseImportDecl()
+ case "const":
+ p.parseConstDecl()
+ case "type":
+ p.parseTypeDecl()
+ case "var":
+ p.parseVarDecl()
+ case "func":
+ p.next() // look ahead
+ if p.tok == '(' {
+ p.parseMethodDecl()
+ } else {
+ p.parseFuncDecl()
+ }
+ }
+ }
+ p.expect('\n')
+}
+
+// ----------------------------------------------------------------------------
+// Export
+
+// Export = "PackageClause { Decl } "$$" .
+// PackageClause = "package" PackageName [ "safe" ] "\n" .
+//
+func (p *parser) parseExport() *types.Package {
+ p.expectKeyword("package")
+ name := p.parsePackageName()
+ if p.tok == scanner.Ident && p.lit == "safe" {
+ // package was compiled with -u option - ignore
+ p.next()
+ }
+ p.expect('\n')
+
+ pkg := p.getPkg(p.id, name)
+
+ for p.tok != '$' && p.tok != scanner.EOF {
+ p.parseDecl()
+ }
+
+ if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' {
+ // don't call next()/expect() since reading past the
+ // export data may cause scanner errors (e.g. NUL chars)
+ p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch)
+ }
+
+ if n := p.scanner.ErrorCount; n != 0 {
+ p.errorf("expected no scanner errors, got %d", n)
+ }
+
+ // package was imported completely and without errors
+ pkg.MarkComplete()
+
+ return pkg
+}
diff --git a/llgo/third_party/go.tools/go/gcimporter/gcimporter_test.go b/llgo/third_party/go.tools/go/gcimporter/gcimporter_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..7c775025c3c9f5a133a4d55bf0d38e0ac78a86be
--- /dev/null
+++ b/llgo/third_party/go.tools/go/gcimporter/gcimporter_test.go
@@ -0,0 +1,216 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcimporter
+
+import (
+ "go/build"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+var gcPath string // Go compiler path
+
+func init() {
+ // determine compiler
+ var gc string
+ switch runtime.GOARCH {
+ case "386":
+ gc = "8g"
+ case "amd64":
+ gc = "6g"
+ case "arm":
+ gc = "5g"
+ default:
+ gcPath = "unknown-GOARCH-compiler"
+ return
+ }
+ gcPath = filepath.Join(build.ToolDir, gc)
+}
+
+func compile(t *testing.T, dirname, filename string) string {
+ cmd := exec.Command(gcPath, filename)
+ cmd.Dir = dirname
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Logf("%s", out)
+ t.Fatalf("%s %s failed: %s", gcPath, filename, err)
+ }
+ archCh, _ := build.ArchChar(runtime.GOARCH)
+ // filename should end with ".go"
+ return filepath.Join(dirname, filename[:len(filename)-2]+archCh)
+}
+
+// Use the same global imports map for all tests. The effect is
+// as if all tested packages were imported into a single package.
+var imports = make(map[string]*types.Package)
+
+func testPath(t *testing.T, path string) bool {
+ t0 := time.Now()
+ _, err := Import(imports, path)
+ if err != nil {
+ t.Errorf("testPath(%s): %s", path, err)
+ return false
+ }
+ t.Logf("testPath(%s): %v", path, time.Since(t0))
+ return true
+}
+
+const maxTime = 30 * time.Second
+
+func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {
+ dirname := filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_"+runtime.GOARCH, dir)
+ list, err := ioutil.ReadDir(dirname)
+ if err != nil {
+ t.Fatalf("testDir(%s): %s", dirname, err)
+ }
+ for _, f := range list {
+ if time.Now().After(endTime) {
+ t.Log("testing time used up")
+ return
+ }
+ switch {
+ case !f.IsDir():
+ // try extensions
+ for _, ext := range pkgExts {
+ if strings.HasSuffix(f.Name(), ext) {
+ name := f.Name()[0 : len(f.Name())-len(ext)] // remove extension
+ if testPath(t, filepath.Join(dir, name)) {
+ nimports++
+ }
+ }
+ }
+ case f.IsDir():
+ nimports += testDir(t, filepath.Join(dir, f.Name()), endTime)
+ }
+ }
+ return
+}
+
+func TestImport(t *testing.T) {
+ // This package does not handle gccgo export data.
+ if runtime.Compiler == "gccgo" {
+ return
+ }
+
+ // On cross-compile builds, the path will not exist.
+ // Need to use GOHOSTOS, which is not available.
+ if _, err := os.Stat(gcPath); err != nil {
+ t.Skipf("skipping test: %v", err)
+ }
+
+ if outFn := compile(t, "testdata", "exports.go"); outFn != "" {
+ defer os.Remove(outFn)
+ }
+
+ nimports := 0
+ if testPath(t, "./testdata/exports") {
+ nimports++
+ }
+ nimports += testDir(t, "", time.Now().Add(maxTime)) // installed packages
+ t.Logf("tested %d imports", nimports)
+}
+
+var importedObjectTests = []struct {
+ name string
+ want string
+}{
+ {"unsafe.Pointer", "type Pointer unsafe.Pointer"},
+ {"math.Pi", "const Pi untyped float"},
+ {"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"},
+ {"io.ReadWriter", "type ReadWriter interface{Read(p []byte) (n int, err error); Write(p []byte) (n int, err error)}"},
+ {"math.Sin", "func Sin(x float64) float64"},
+ // TODO(gri) add more tests
+}
+
+func TestImportedTypes(t *testing.T) {
+ // This package does not handle gccgo export data.
+ if runtime.Compiler == "gccgo" {
+ return
+ }
+ for _, test := range importedObjectTests {
+ s := strings.Split(test.name, ".")
+ if len(s) != 2 {
+ t.Fatal("inconsistent test data")
+ }
+ importPath := s[0]
+ objName := s[1]
+
+ pkg, err := Import(imports, importPath)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ obj := pkg.Scope().Lookup(objName)
+ if obj == nil {
+ t.Errorf("%s: object not found", test.name)
+ continue
+ }
+
+ got := types.ObjectString(pkg, obj)
+ if got != test.want {
+ t.Errorf("%s: got %q; want %q", test.name, got, test.want)
+ }
+ }
+}
+
+func TestIssue5815(t *testing.T) {
+ // This package does not handle gccgo export data.
+ if runtime.Compiler == "gccgo" {
+ return
+ }
+
+ pkg, err := Import(make(map[string]*types.Package), "strings")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ obj := scope.Lookup(name)
+ if obj.Pkg() == nil {
+ t.Errorf("no pkg for %s", obj)
+ }
+ if tname, _ := obj.(*types.TypeName); tname != nil {
+ named := tname.Type().(*types.Named)
+ for i := 0; i < named.NumMethods(); i++ {
+ m := named.Method(i)
+ if m.Pkg() == nil {
+ t.Errorf("no pkg for %s", m)
+ }
+ }
+ }
+ }
+}
+
+// Smoke test to ensure that imported methods get the correct package.
+func TestCorrectMethodPackage(t *testing.T) {
+ // This package does not handle gccgo export data.
+ if runtime.Compiler == "gccgo" {
+ return
+ }
+
+ imports := make(map[string]*types.Package)
+ _, err := Import(imports, "net/http")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ mutex := imports["sync"].Scope().Lookup("Mutex").(*types.TypeName).Type()
+ mset := types.NewMethodSet(types.NewPointer(mutex)) // methods of *sync.Mutex
+ sel := mset.Lookup(nil, "Lock")
+ lock := sel.Obj().(*types.Func)
+ if got, want := lock.Pkg().Path(), "sync"; got != want {
+ t.Errorf("got package path %q; want %q", got, want)
+ }
+}
diff --git a/llgo/third_party/go.tools/go/gcimporter/testdata/exports.go b/llgo/third_party/go.tools/go/gcimporter/testdata/exports.go
new file mode 100644
index 0000000000000000000000000000000000000000..8ee28b0942b69cab01baef4e6203a6ce12c63c26
--- /dev/null
+++ b/llgo/third_party/go.tools/go/gcimporter/testdata/exports.go
@@ -0,0 +1,89 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is used to generate an object file which
+// serves as test file for gcimporter_test.go.
+
+package exports
+
+import (
+ "go/ast"
+)
+
+// Issue 3682: Correctly read dotted identifiers from export data.
+const init1 = 0
+
+func init() {}
+
+const (
+ C0 int = 0
+ C1 = 3.14159265
+ C2 = 2.718281828i
+ C3 = -123.456e-789
+ C4 = +123.456E+789
+ C5 = 1234i
+ C6 = "foo\n"
+ C7 = `bar\n`
+)
+
+type (
+ T1 int
+ T2 [10]int
+ T3 []int
+ T4 *int
+ T5 chan int
+ T6a chan<- int
+ T6b chan (<-chan int)
+ T6c chan<- (chan int)
+ T7 <-chan *ast.File
+ T8 struct{}
+ T9 struct {
+ a int
+ b, c float32
+ d []string `go:"tag"`
+ }
+ T10 struct {
+ T8
+ T9
+ _ *T10
+ }
+ T11 map[int]string
+ T12 interface{}
+ T13 interface {
+ m1()
+ m2(int) float32
+ }
+ T14 interface {
+ T12
+ T13
+ m3(x ...struct{}) []T9
+ }
+ T15 func()
+ T16 func(int)
+ T17 func(x int)
+ T18 func() float32
+ T19 func() (x float32)
+ T20 func(...interface{})
+ T21 struct{ next *T21 }
+ T22 struct{ link *T23 }
+ T23 struct{ link *T22 }
+ T24 *T24
+ T25 *T26
+ T26 *T27
+ T27 *T25
+ T28 func(T28) T28
+)
+
+var (
+ V0 int
+ V1 = -991.0
+)
+
+func F1() {}
+func F2(x int) {}
+func F3() int { return 0 }
+func F4() float32 { return 0 }
+func F5(a, b, c int, u, v, w struct{ x, y T1 }, more ...interface{}) (p, q, r chan<- T10)
+
+func (p *T1) M1()
diff --git a/llgo/third_party/go.tools/go/importer/export.go b/llgo/third_party/go.tools/go/importer/export.go
new file mode 100644
index 0000000000000000000000000000000000000000..aae4480212e921d0363775f34e99a51fa78b490b
--- /dev/null
+++ b/llgo/third_party/go.tools/go/importer/export.go
@@ -0,0 +1,462 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package importer
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/ast"
+ "strings"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// debugging support
+const (
+ debug = false // emit debugging data
+ trace = false // print emitted data
+)
+
+// format returns a byte indicating the low-level encoding/decoding format
+// (debug vs product).
+func format() byte {
+ if debug {
+ return 'd'
+ }
+ return 'p'
+}
+
+// ExportData serializes the interface (exported package objects)
+// of package pkg and returns the corresponding data. The export
+// format is described elsewhere (TODO).
+func ExportData(pkg *types.Package) []byte {
+ p := exporter{
+ data: append([]byte(magic), format()),
+ pkgIndex: make(map[*types.Package]int),
+ typIndex: make(map[types.Type]int),
+ }
+
+ // populate typIndex with predeclared types
+ for _, t := range predeclared {
+ p.typIndex[t] = len(p.typIndex)
+ }
+
+ if trace {
+ p.tracef("export %s\n", pkg.Name())
+ defer p.tracef("\n")
+ }
+
+ p.string(version)
+
+ p.pkg(pkg)
+
+ // collect exported objects from package scope
+ var list []types.Object
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ if exported(name) {
+ list = append(list, scope.Lookup(name))
+ }
+ }
+
+ // write objects
+ p.int(len(list))
+ for _, obj := range list {
+ p.obj(obj)
+ }
+
+ return p.data
+}
+
+type exporter struct {
+ data []byte
+ pkgIndex map[*types.Package]int
+ typIndex map[types.Type]int
+
+ // tracing support
+ indent string
+}
+
+func (p *exporter) pkg(pkg *types.Package) {
+ if trace {
+ p.tracef("package { ")
+ defer p.tracef("} ")
+ }
+
+ if pkg == nil {
+ panic("unexpected nil pkg")
+ }
+
+ // if the package was seen before, write its index (>= 0)
+ if i, ok := p.pkgIndex[pkg]; ok {
+ p.int(i)
+ return
+ }
+ p.pkgIndex[pkg] = len(p.pkgIndex)
+
+ // otherwise, write the package tag (< 0) and package data
+ p.int(packageTag)
+ p.string(pkg.Name())
+ p.string(pkg.Path())
+}
+
+func (p *exporter) obj(obj types.Object) {
+ if trace {
+ p.tracef("object %s {\n", obj.Name())
+ defer p.tracef("}\n")
+ }
+
+ switch obj := obj.(type) {
+ case *types.Const:
+ p.int(constTag)
+ p.string(obj.Name())
+ p.typ(obj.Type())
+ p.value(obj.Val())
+ case *types.TypeName:
+ p.int(typeTag)
+ // name is written by corresponding named type
+ p.typ(obj.Type().(*types.Named))
+ case *types.Var:
+ p.int(varTag)
+ p.string(obj.Name())
+ p.typ(obj.Type())
+ case *types.Func:
+ p.int(funcTag)
+ p.string(obj.Name())
+ p.typ(obj.Type())
+ default:
+ panic(fmt.Sprintf("unexpected object type %T", obj))
+ }
+}
+
+func (p *exporter) value(x exact.Value) {
+ if trace {
+ p.tracef("value { ")
+ defer p.tracef("} ")
+ }
+
+ switch kind := x.Kind(); kind {
+ case exact.Bool:
+ tag := falseTag
+ if exact.BoolVal(x) {
+ tag = trueTag
+ }
+ p.int(tag)
+ case exact.Int:
+ if i, ok := exact.Int64Val(x); ok {
+ p.int(int64Tag)
+ p.int64(i)
+ return
+ }
+ p.int(floatTag)
+ p.float(x)
+ case exact.Float:
+ p.int(fractionTag)
+ p.fraction(x)
+ case exact.Complex:
+ p.int(complexTag)
+ p.fraction(exact.Real(x))
+ p.fraction(exact.Imag(x))
+ case exact.String:
+ p.int(stringTag)
+ p.string(exact.StringVal(x))
+ default:
+ panic(fmt.Sprintf("unexpected value kind %d", kind))
+ }
+}
+
+func (p *exporter) float(x exact.Value) {
+ sign := exact.Sign(x)
+ p.int(sign)
+ if sign == 0 {
+ return
+ }
+
+ p.ufloat(x)
+}
+
+func (p *exporter) fraction(x exact.Value) {
+ sign := exact.Sign(x)
+ p.int(sign)
+ if sign == 0 {
+ return
+ }
+
+ p.ufloat(exact.Num(x))
+ p.ufloat(exact.Denom(x))
+}
+
+// ufloat writes abs(x) in form of a binary exponent
+// followed by its mantissa bytes; x must be != 0.
+func (p *exporter) ufloat(x exact.Value) {
+ mant := exact.Bytes(x)
+ exp8 := -1
+ for i, b := range mant {
+ if b != 0 {
+ exp8 = i
+ break
+ }
+ }
+ if exp8 < 0 {
+ panic(fmt.Sprintf("%s has no mantissa", x))
+ }
+ p.int(exp8 * 8)
+ p.bytes(mant[exp8:])
+}
+
+func (p *exporter) typ(typ types.Type) {
+ if trace {
+ p.tracef("type {\n")
+ defer p.tracef("}\n")
+ }
+
+ // if the type was seen before, write its index (>= 0)
+ if i, ok := p.typIndex[typ]; ok {
+ p.int(i)
+ return
+ }
+ p.typIndex[typ] = len(p.typIndex)
+
+ // otherwise, write the type tag (< 0) and type data
+ switch t := typ.(type) {
+ case *types.Array:
+ p.int(arrayTag)
+ p.int64(t.Len())
+ p.typ(t.Elem())
+
+ case *types.Slice:
+ p.int(sliceTag)
+ p.typ(t.Elem())
+
+ case *types.Struct:
+ p.int(structTag)
+ n := t.NumFields()
+ p.int(n)
+ for i := 0; i < n; i++ {
+ p.field(t.Field(i))
+ p.string(t.Tag(i))
+ }
+
+ case *types.Pointer:
+ p.int(pointerTag)
+ p.typ(t.Elem())
+
+ case *types.Signature:
+ p.int(signatureTag)
+ p.signature(t)
+
+ case *types.Interface:
+ p.int(interfaceTag)
+
+ // write embedded interfaces
+ m := t.NumEmbeddeds()
+ p.int(m)
+ for i := 0; i < m; i++ {
+ p.typ(t.Embedded(i))
+ }
+
+ // write methods
+ n := t.NumExplicitMethods()
+ p.int(n)
+ for i := 0; i < n; i++ {
+ m := t.ExplicitMethod(i)
+ p.qualifiedName(m.Pkg(), m.Name())
+ p.typ(m.Type())
+ }
+
+ case *types.Map:
+ p.int(mapTag)
+ p.typ(t.Key())
+ p.typ(t.Elem())
+
+ case *types.Chan:
+ p.int(chanTag)
+ p.int(int(t.Dir()))
+ p.typ(t.Elem())
+
+ case *types.Named:
+ p.int(namedTag)
+
+ // write type object
+ obj := t.Obj()
+ p.string(obj.Name())
+ p.pkg(obj.Pkg())
+
+ // write underlying type
+ p.typ(t.Underlying())
+
+ // write associated methods
+ n := t.NumMethods()
+ p.int(n)
+ for i := 0; i < n; i++ {
+ m := t.Method(i)
+ p.string(m.Name())
+ p.typ(m.Type())
+ }
+
+ default:
+ panic("unreachable")
+ }
+}
+
+func (p *exporter) field(f *types.Var) {
+ // anonymous fields have "" name
+ name := ""
+ if !f.Anonymous() {
+ name = f.Name()
+ }
+
+ // qualifiedName will always emit the field package for
+ // anonymous fields because "" is not an exported name.
+ p.qualifiedName(f.Pkg(), name)
+ p.typ(f.Type())
+}
+
+func (p *exporter) qualifiedName(pkg *types.Package, name string) {
+ p.string(name)
+ // exported names don't need package
+ if !exported(name) {
+ if pkg == nil {
+ panic(fmt.Sprintf("nil package for unexported qualified name %s", name))
+ }
+ p.pkg(pkg)
+ }
+}
+
+func (p *exporter) signature(sig *types.Signature) {
+ // We need the receiver information (T vs *T)
+ // for methods associated with named types.
+ // We do not record interface receiver types in the
+ // export data because 1) the importer can derive them
+ // from the interface type and 2) they create cycles
+ // in the type graph.
+ if recv := sig.Recv(); recv != nil {
+ if _, ok := recv.Type().Underlying().(*types.Interface); !ok {
+ // 1-element tuple
+ p.int(1)
+ p.param(recv)
+ } else {
+ // 0-element tuple
+ p.int(0)
+ }
+ } else {
+ // 0-element tuple
+ p.int(0)
+ }
+ p.tuple(sig.Params())
+ p.tuple(sig.Results())
+ if sig.Variadic() {
+ p.int(1)
+ } else {
+ p.int(0)
+ }
+}
+
+func (p *exporter) param(v *types.Var) {
+ p.string(v.Name())
+ p.typ(v.Type())
+}
+
+func (p *exporter) tuple(t *types.Tuple) {
+ n := t.Len()
+ p.int(n)
+ for i := 0; i < n; i++ {
+ p.param(t.At(i))
+ }
+}
+
+// ----------------------------------------------------------------------------
+// encoders
+
+func (p *exporter) string(s string) {
+ p.bytes([]byte(s)) // (could be inlined if extra allocation matters)
+}
+
+func (p *exporter) int(x int) {
+ p.int64(int64(x))
+}
+
+func (p *exporter) int64(x int64) {
+ if debug {
+ p.marker('i')
+ }
+
+ if trace {
+ p.tracef("%d ", x)
+ }
+
+ p.rawInt64(x)
+}
+
+func (p *exporter) bytes(b []byte) {
+ if debug {
+ p.marker('b')
+ }
+
+ if trace {
+ p.tracef("%q ", b)
+ }
+
+ p.rawInt64(int64(len(b)))
+ if len(b) > 0 {
+ p.data = append(p.data, b...)
+ }
+}
+
+// marker emits a marker byte and position information which makes
+// it easy for a reader to detect if it is "out of sync". Used for
+// debug format only.
+func (p *exporter) marker(m byte) {
+ if debug {
+ p.data = append(p.data, m)
+ p.rawInt64(int64(len(p.data)))
+ }
+}
+
+// rawInt64 should only be used by low-level encoders
+func (p *exporter) rawInt64(x int64) {
+ var tmp [binary.MaxVarintLen64]byte
+ n := binary.PutVarint(tmp[:], x)
+ p.data = append(p.data, tmp[:n]...)
+}
+
+// utility functions
+
+func (p *exporter) tracef(format string, args ...interface{}) {
+ // rewrite format string to take care of indentation
+ const indent = ". "
+ if strings.IndexAny(format, "{}\n") >= 0 {
+ var buf bytes.Buffer
+ for i := 0; i < len(format); i++ {
+ // no need to deal with runes
+ ch := format[i]
+ switch ch {
+ case '{':
+ p.indent += indent
+ case '}':
+ p.indent = p.indent[:len(p.indent)-len(indent)]
+ if i+1 < len(format) && format[i+1] == '\n' {
+ buf.WriteByte('\n')
+ buf.WriteString(p.indent)
+ buf.WriteString("} ")
+ i++
+ continue
+ }
+ }
+ buf.WriteByte(ch)
+ if ch == '\n' {
+ buf.WriteString(p.indent)
+ }
+ }
+ format = buf.String()
+ }
+ fmt.Printf(format, args...)
+}
+
+func exported(name string) bool {
+ return ast.IsExported(name)
+}
diff --git a/llgo/third_party/go.tools/go/importer/import.go b/llgo/third_party/go.tools/go/importer/import.go
new file mode 100644
index 0000000000000000000000000000000000000000..0372c9c59a04800538ebf9c487fdc2c53a227449
--- /dev/null
+++ b/llgo/third_party/go.tools/go/importer/import.go
@@ -0,0 +1,456 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This implementation is loosely based on the algorithm described
+// in: "On the linearization of graphs and writing symbol files",
+// by R. Griesemer, Technical Report 156, ETH Zürich, 1991.
+
+// package importer implements an exporter and importer for Go export data.
+package importer
+
+import (
+ "encoding/binary"
+ "fmt"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// ImportData imports a package from the serialized package data
+// and returns the number of bytes consumed and a reference to the package.
+// If data is obviously malformed, an error is returned but in
+// general it is not recommended to call ImportData on untrusted
+// data.
+func ImportData(imports map[string]*types.Package, data []byte) (int, *types.Package, error) {
+ datalen := len(data)
+
+ // check magic string
+ var s string
+ if len(data) >= len(magic) {
+ s = string(data[:len(magic)])
+ data = data[len(magic):]
+ }
+ if s != magic {
+ return 0, nil, fmt.Errorf("incorrect magic string: got %q; want %q", s, magic)
+ }
+
+ // check low-level encoding format
+ var m byte = 'm' // missing format
+ if len(data) > 0 {
+ m = data[0]
+ data = data[1:]
+ }
+ if m != format() {
+ return 0, nil, fmt.Errorf("incorrect low-level encoding format: got %c; want %c", m, format())
+ }
+
+ p := importer{
+ data: data,
+ datalen: datalen,
+ imports: imports,
+ }
+
+ // populate typList with predeclared types
+ for _, t := range predeclared {
+ p.typList = append(p.typList, t)
+ }
+
+ if v := p.string(); v != version {
+ return 0, nil, fmt.Errorf("unknown version: got %s; want %s", v, version)
+ }
+
+ pkg := p.pkg()
+ if debug && p.pkgList[0] != pkg {
+ panic("imported packaged not found in pkgList[0]")
+ }
+
+ // read objects
+ n := p.int()
+ for i := 0; i < n; i++ {
+ p.obj(pkg)
+ }
+
+ // complete interfaces
+ for _, typ := range p.typList {
+ if it, ok := typ.(*types.Interface); ok {
+ it.Complete()
+ }
+ }
+
+ // package was imported completely and without errors
+ pkg.MarkComplete()
+
+ return p.consumed(), pkg, nil
+}
+
+type importer struct {
+ data []byte
+ datalen int
+ imports map[string]*types.Package
+ pkgList []*types.Package
+ typList []types.Type
+}
+
+func (p *importer) pkg() *types.Package {
+ // if the package was seen before, i is its index (>= 0)
+ i := p.int()
+ if i >= 0 {
+ return p.pkgList[i]
+ }
+
+ // otherwise, i is the package tag (< 0)
+ if i != packageTag {
+ panic(fmt.Sprintf("unexpected package tag %d", i))
+ }
+
+ // read package data
+ name := p.string()
+ path := p.string()
+
+ // if the package was imported before, use that one; otherwise create a new one
+ pkg := p.imports[path]
+ if pkg == nil {
+ pkg = types.NewPackage(path, name)
+ p.imports[path] = pkg
+ }
+ p.pkgList = append(p.pkgList, pkg)
+
+ return pkg
+}
+
+func (p *importer) obj(pkg *types.Package) {
+ var obj types.Object
+ switch tag := p.int(); tag {
+ case constTag:
+ obj = types.NewConst(token.NoPos, pkg, p.string(), p.typ(), p.value())
+ case typeTag:
+ // type object is added to scope via respective named type
+ _ = p.typ().(*types.Named)
+ return
+ case varTag:
+ obj = types.NewVar(token.NoPos, pkg, p.string(), p.typ())
+ case funcTag:
+ obj = types.NewFunc(token.NoPos, pkg, p.string(), p.typ().(*types.Signature))
+ default:
+ panic(fmt.Sprintf("unexpected object tag %d", tag))
+ }
+
+ if alt := pkg.Scope().Insert(obj); alt != nil {
+ panic(fmt.Sprintf("%s already declared", alt.Name()))
+ }
+}
+
+func (p *importer) value() exact.Value {
+ switch kind := exact.Kind(p.int()); kind {
+ case falseTag:
+ return exact.MakeBool(false)
+ case trueTag:
+ return exact.MakeBool(true)
+ case int64Tag:
+ return exact.MakeInt64(p.int64())
+ case floatTag:
+ return p.float()
+ case fractionTag:
+ return p.fraction()
+ case complexTag:
+ re := p.fraction()
+ im := p.fraction()
+ return exact.BinaryOp(re, token.ADD, exact.MakeImag(im))
+ case stringTag:
+ return exact.MakeString(p.string())
+ default:
+ panic(fmt.Sprintf("unexpected value kind %d", kind))
+ }
+}
+
+func (p *importer) float() exact.Value {
+ sign := p.int()
+ if sign == 0 {
+ return exact.MakeInt64(0)
+ }
+
+ x := p.ufloat()
+ if sign < 0 {
+ x = exact.UnaryOp(token.SUB, x, 0)
+ }
+ return x
+}
+
+func (p *importer) fraction() exact.Value {
+ sign := p.int()
+ if sign == 0 {
+ return exact.MakeInt64(0)
+ }
+
+ x := exact.BinaryOp(p.ufloat(), token.QUO, p.ufloat())
+ if sign < 0 {
+ x = exact.UnaryOp(token.SUB, x, 0)
+ }
+ return x
+}
+
+func (p *importer) ufloat() exact.Value {
+ exp := p.int()
+ x := exact.MakeFromBytes(p.bytes())
+ switch {
+ case exp < 0:
+ d := exact.Shift(exact.MakeInt64(1), token.SHL, uint(-exp))
+ x = exact.BinaryOp(x, token.QUO, d)
+ case exp > 0:
+ x = exact.Shift(x, token.SHL, uint(exp))
+ }
+ return x
+}
+
+func (p *importer) record(t types.Type) {
+ p.typList = append(p.typList, t)
+}
+
+func (p *importer) typ() types.Type {
+ // if the type was seen before, i is its index (>= 0)
+ i := p.int()
+ if i >= 0 {
+ return p.typList[i]
+ }
+
+ // otherwise, i is the type tag (< 0)
+ switch i {
+ case arrayTag:
+ t := new(types.Array)
+ p.record(t)
+
+ n := p.int64()
+ *t = *types.NewArray(p.typ(), n)
+ return t
+
+ case sliceTag:
+ t := new(types.Slice)
+ p.record(t)
+
+ *t = *types.NewSlice(p.typ())
+ return t
+
+ case structTag:
+ t := new(types.Struct)
+ p.record(t)
+
+ n := p.int()
+ fields := make([]*types.Var, n)
+ tags := make([]string, n)
+ for i := range fields {
+ fields[i] = p.field()
+ tags[i] = p.string()
+ }
+ *t = *types.NewStruct(fields, tags)
+ return t
+
+ case pointerTag:
+ t := new(types.Pointer)
+ p.record(t)
+
+ *t = *types.NewPointer(p.typ())
+ return t
+
+ case signatureTag:
+ t := new(types.Signature)
+ p.record(t)
+
+ *t = *p.signature()
+ return t
+
+ case interfaceTag:
+ // Create a dummy entry in the type list. This is safe because we
+ // cannot expect the interface type to appear in a cycle, as any
+ // such cycle must contain a named type which would have been
+ // first defined earlier.
+ n := len(p.typList)
+ p.record(nil)
+
+ // read embedded interfaces
+ embeddeds := make([]*types.Named, p.int())
+ for i := range embeddeds {
+ embeddeds[i] = p.typ().(*types.Named)
+ }
+
+ // read methods
+ methods := make([]*types.Func, p.int())
+ for i := range methods {
+ pkg, name := p.qualifiedName()
+ methods[i] = types.NewFunc(token.NoPos, pkg, name, p.typ().(*types.Signature))
+ }
+
+ t := types.NewInterface(methods, embeddeds)
+ p.typList[n] = t
+ return t
+
+ case mapTag:
+ t := new(types.Map)
+ p.record(t)
+
+ *t = *types.NewMap(p.typ(), p.typ())
+ return t
+
+ case chanTag:
+ t := new(types.Chan)
+ p.record(t)
+
+ *t = *types.NewChan(types.ChanDir(p.int()), p.typ())
+ return t
+
+ case namedTag:
+ // read type object
+ name := p.string()
+ pkg := p.pkg()
+ scope := pkg.Scope()
+ obj := scope.Lookup(name)
+
+ // if the object doesn't exist yet, create and insert it
+ if obj == nil {
+ obj = types.NewTypeName(token.NoPos, pkg, name, nil)
+ scope.Insert(obj)
+ }
+
+ // associate new named type with obj if it doesn't exist yet
+ t0 := types.NewNamed(obj.(*types.TypeName), nil, nil)
+
+ // but record the existing type, if any
+ t := obj.Type().(*types.Named)
+ p.record(t)
+
+ // read underlying type
+ t0.SetUnderlying(p.typ())
+
+ // read associated methods
+ for i, n := 0, p.int(); i < n; i++ {
+ t0.AddMethod(types.NewFunc(token.NoPos, pkg, p.string(), p.typ().(*types.Signature)))
+ }
+
+ return t
+
+ default:
+ panic(fmt.Sprintf("unexpected type tag %d", i))
+ }
+}
+
+func deref(typ types.Type) types.Type {
+ if p, _ := typ.(*types.Pointer); p != nil {
+ return p.Elem()
+ }
+ return typ
+}
+
+func (p *importer) field() *types.Var {
+ pkg, name := p.qualifiedName()
+ typ := p.typ()
+
+ anonymous := false
+ if name == "" {
+ // anonymous field - typ must be T or *T and T must be a type name
+ switch typ := deref(typ).(type) {
+ case *types.Basic: // basic types are named types
+ pkg = nil
+ name = typ.Name()
+ case *types.Named:
+ obj := typ.Obj()
+ name = obj.Name()
+ // correct the field package for anonymous fields
+ if exported(name) {
+ pkg = p.pkgList[0]
+ }
+ default:
+ panic("anonymous field expected")
+ }
+ anonymous = true
+ }
+
+ return types.NewField(token.NoPos, pkg, name, typ, anonymous)
+}
+
+func (p *importer) qualifiedName() (*types.Package, string) {
+ name := p.string()
+ pkg := p.pkgList[0] // exported names assume current package
+ if !exported(name) {
+ pkg = p.pkg()
+ }
+ return pkg, name
+}
+
+func (p *importer) signature() *types.Signature {
+ var recv *types.Var
+ if p.int() != 0 {
+ recv = p.param()
+ }
+ return types.NewSignature(nil, recv, p.tuple(), p.tuple(), p.int() != 0)
+}
+
+func (p *importer) param() *types.Var {
+ return types.NewVar(token.NoPos, nil, p.string(), p.typ())
+}
+
+func (p *importer) tuple() *types.Tuple {
+ vars := make([]*types.Var, p.int())
+ for i := range vars {
+ vars[i] = p.param()
+ }
+ return types.NewTuple(vars...)
+}
+
+// ----------------------------------------------------------------------------
+// decoders
+
+func (p *importer) string() string {
+ return string(p.bytes())
+}
+
+func (p *importer) int() int {
+ return int(p.int64())
+}
+
+func (p *importer) int64() int64 {
+ if debug {
+ p.marker('i')
+ }
+
+ return p.rawInt64()
+}
+
+// Note: bytes() returns the respective byte slice w/o copy.
+func (p *importer) bytes() []byte {
+ if debug {
+ p.marker('b')
+ }
+
+ var b []byte
+ if n := int(p.rawInt64()); n > 0 {
+ b = p.data[:n]
+ p.data = p.data[n:]
+ }
+ return b
+}
+
+func (p *importer) marker(want byte) {
+ if debug {
+ if got := p.data[0]; got != want {
+ panic(fmt.Sprintf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.consumed()))
+ }
+ p.data = p.data[1:]
+
+ pos := p.consumed()
+ if n := int(p.rawInt64()); n != pos {
+ panic(fmt.Sprintf("incorrect position: got %d; want %d", n, pos))
+ }
+ }
+}
+
+// rawInt64 should only be used by low-level decoders
+func (p *importer) rawInt64() int64 {
+ i, n := binary.Varint(p.data)
+ p.data = p.data[n:]
+ return i
+}
+
+func (p *importer) consumed() int {
+ return p.datalen - len(p.data)
+}
diff --git a/llgo/third_party/go.tools/go/importer/import_test.go b/llgo/third_party/go.tools/go/importer/import_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..66ba07e37901c21d5a8038866bec098ea8e1d478
--- /dev/null
+++ b/llgo/third_party/go.tools/go/importer/import_test.go
@@ -0,0 +1,382 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package importer
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strconv"
+ "testing"
+ "time"
+
+ "llvm.org/llgo/third_party/go.tools/go/gcimporter"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+var fset = token.NewFileSet()
+
+var tests = []string{
+ `package p`,
+
+ // consts
+ `package p; const X = true`,
+ `package p; const X, y, Z = true, false, 0 != 0`,
+ `package p; const ( A float32 = 1<= 750*time.Millisecond {
+ return
+ }
+
+ pkg, err := pkgForPath(lib)
+ switch err := err.(type) {
+ case nil:
+ // ok
+ case *build.NoGoError:
+ // no Go files - ignore
+ continue
+ default:
+ t.Errorf("typecheck failed: %s", err)
+ continue
+ }
+
+ size, gcsize := testExportImport(t, pkg, lib)
+ if gcsize == 0 {
+ // if gc import didn't happen, assume same size
+ // (and avoid division by zero below)
+ gcsize = size
+ }
+
+ if testing.Verbose() {
+ fmt.Printf("%s\t%d\t%d\t%d%%\n", lib, size, gcsize, int(float64(size)*100/float64(gcsize)))
+ }
+ totSize += size
+ totGcSize += gcsize
+ }
+
+ if testing.Verbose() {
+ fmt.Printf("\n%d\t%d\t%d%%\n", totSize, totGcSize, int(float64(totSize)*100/float64(totGcSize)))
+ }
+
+ types.GcCompatibilityMode = false
+}
+
+func testExportImport(t *testing.T, pkg0 *types.Package, path string) (size, gcsize int) {
+ data := ExportData(pkg0)
+ size = len(data)
+
+ imports := make(map[string]*types.Package)
+ n, pkg1, err := ImportData(imports, data)
+ if err != nil {
+ t.Errorf("package %s: import failed: %s", pkg0.Name(), err)
+ return
+ }
+ if n != size {
+ t.Errorf("package %s: not all input data consumed", pkg0.Name())
+ return
+ }
+
+ s0 := pkgString(pkg0)
+ s1 := pkgString(pkg1)
+ if s1 != s0 {
+ t.Errorf("package %s: \nimport got:\n%s\nwant:\n%s\n", pkg0.Name(), s1, s0)
+ }
+
+ // If we have a standard library, compare also against the gcimported package.
+ if path == "" {
+ return // not std library
+ }
+
+ gcdata, err := gcExportData(path)
+ if err != nil {
+ if pkg0.Name() == "main" {
+ return // no export data present for main package
+ }
+ t.Errorf("package %s: couldn't get export data: %s", pkg0.Name(), err)
+ }
+ gcsize = len(gcdata)
+
+ imports = make(map[string]*types.Package)
+ pkg2, err := gcImportData(imports, gcdata, path)
+ if err != nil {
+ t.Errorf("package %s: gcimport failed: %s", pkg0.Name(), err)
+ return
+ }
+
+ s2 := pkgString(pkg2)
+ if s2 != s0 {
+ t.Errorf("package %s: \ngcimport got:\n%s\nwant:\n%s\n", pkg0.Name(), s2, s0)
+ }
+
+ return
+}
+
+func pkgForSource(src string) (*types.Package, error) {
+ f, err := parser.ParseFile(fset, "", src, 0)
+ if err != nil {
+ return nil, err
+ }
+ return typecheck("import-test", f)
+}
+
+func pkgForPath(path string) (*types.Package, error) {
+ // collect filenames
+ ctxt := build.Default
+ pkginfo, err := ctxt.Import(path, "", 0)
+ if err != nil {
+ return nil, err
+ }
+ filenames := append(pkginfo.GoFiles, pkginfo.CgoFiles...)
+
+ // parse files
+ files := make([]*ast.File, len(filenames))
+ for i, filename := range filenames {
+ var err error
+ files[i], err = parser.ParseFile(fset, filepath.Join(pkginfo.Dir, filename), nil, 0)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return typecheck(path, files...)
+}
+
+var defaultConf = types.Config{
+ // we only care about exports and thus can ignore function bodies
+ IgnoreFuncBodies: true,
+ // work around C imports if possible
+ FakeImportC: true,
+ // strconv exports IntSize as a constant. The type-checker must
+ // use the same word size otherwise the result of the type-checker
+ // and gc imports is different. We don't care about alignment
+ // since none of the tests have exported constants depending
+ // on alignment (see also issue 8366).
+ Sizes: &types.StdSizes{WordSize: strconv.IntSize / 8, MaxAlign: 8},
+}
+
+func typecheck(path string, files ...*ast.File) (*types.Package, error) {
+ return defaultConf.Check(path, fset, files, nil)
+}
+
+// pkgString returns a string representation of a package's exported interface.
+func pkgString(pkg *types.Package) string {
+ var buf bytes.Buffer
+
+ fmt.Fprintf(&buf, "package %s\n", pkg.Name())
+
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ if exported(name) {
+ obj := scope.Lookup(name)
+ buf.WriteString(obj.String())
+
+ switch obj := obj.(type) {
+ case *types.Const:
+ // For now only print constant values if they are not float
+ // or complex. This permits comparing go/types results with
+ // gc-generated gcimported package interfaces.
+ info := obj.Type().Underlying().(*types.Basic).Info()
+ if info&types.IsFloat == 0 && info&types.IsComplex == 0 {
+ fmt.Fprintf(&buf, " = %s", obj.Val())
+ }
+
+ case *types.TypeName:
+ // Print associated methods.
+ // Basic types (e.g., unsafe.Pointer) have *types.Basic
+ // type rather than *types.Named; so we need to check.
+ if typ, _ := obj.Type().(*types.Named); typ != nil {
+ if n := typ.NumMethods(); n > 0 {
+ // Sort methods by name so that we get the
+ // same order independent of whether the
+ // methods got imported or coming directly
+ // for the source.
+ // TODO(gri) This should probably be done
+ // in go/types.
+ list := make([]*types.Func, n)
+ for i := 0; i < n; i++ {
+ list[i] = typ.Method(i)
+ }
+ sort.Sort(byName(list))
+
+ buf.WriteString("\nmethods (\n")
+ for _, m := range list {
+ fmt.Fprintf(&buf, "\t%s\n", m)
+ }
+ buf.WriteString(")")
+ }
+ }
+ }
+ buf.WriteByte('\n')
+ }
+ }
+
+ return buf.String()
+}
+
+var stdLibRoot = filepath.Join(runtime.GOROOT(), "src") + string(filepath.Separator)
+
+// The following std libraries are excluded from the stdLibs list.
+var excluded = map[string]bool{
+ "builtin": true, // contains type declarations with cycles
+ "unsafe": true, // contains fake declarations
+}
+
+// stdLibs returns the list of standard library package paths.
+func stdLibs() (list []string, err error) {
+ err = filepath.Walk(stdLibRoot, func(path string, info os.FileInfo, err error) error {
+ if err == nil && info.IsDir() {
+ // testdata directories don't contain importable libraries
+ if info.Name() == "testdata" {
+ return filepath.SkipDir
+ }
+ pkgPath := path[len(stdLibRoot):] // remove stdLibRoot
+ if len(pkgPath) > 0 && !excluded[pkgPath] {
+ list = append(list, pkgPath)
+ }
+ }
+ return nil
+ })
+ return
+}
+
+type byName []*types.Func
+
+func (a byName) Len() int { return len(a) }
+func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byName) Less(i, j int) bool { return a[i].Name() < a[j].Name() }
+
+// gcExportData returns the gc-generated export data for the given path.
+// It is based on a trimmed-down version of gcimporter.Import which does
+// not do the actual import, does not handle package unsafe, and assumes
+// that path is a correct standard library package path (no canonicalization,
+// or handling of local import paths).
+func gcExportData(path string) ([]byte, error) {
+ filename, id := gcimporter.FindPkg(path, "")
+ if filename == "" {
+ return nil, fmt.Errorf("can't find import: %s", path)
+ }
+ if id != path {
+ panic("path should be canonicalized")
+ }
+
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ buf := bufio.NewReader(f)
+ if err = gcimporter.FindExportData(buf); err != nil {
+ return nil, err
+ }
+
+ var data []byte
+ for {
+ line, err := buf.ReadBytes('\n')
+ if err != nil {
+ return nil, err
+ }
+ data = append(data, line...)
+ // export data ends in "$$\n"
+ if len(line) == 3 && line[0] == '$' && line[1] == '$' {
+ return data, nil
+ }
+ }
+}
+
+func gcImportData(imports map[string]*types.Package, data []byte, path string) (*types.Package, error) {
+ filename := fmt.Sprintf("", path) // so we have a decent error message if necessary
+ return gcimporter.ImportData(imports, filename, path, bufio.NewReader(bytes.NewBuffer(data)))
+}
diff --git a/llgo/third_party/go.tools/go/importer/predefined.go b/llgo/third_party/go.tools/go/importer/predefined.go
new file mode 100644
index 0000000000000000000000000000000000000000..9ce1334809134e5c7d32b9aacda7781cc1460882
--- /dev/null
+++ b/llgo/third_party/go.tools/go/importer/predefined.go
@@ -0,0 +1,83 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package importer
+
+import "llvm.org/llgo/third_party/go.tools/go/types"
+
+const (
+ magic = "\n$$ exports $$\n"
+ version = "v0"
+)
+
+// Tags. Must be < 0.
+const (
+ // Packages
+ packageTag = -(iota + 1)
+
+ // Objects
+ constTag
+ typeTag
+ varTag
+ funcTag
+
+ // Types
+ arrayTag
+ sliceTag
+ structTag
+ pointerTag
+ signatureTag
+ interfaceTag
+ mapTag
+ chanTag
+ namedTag
+
+ // Values
+ falseTag
+ trueTag
+ int64Tag
+ floatTag
+ fractionTag
+ complexTag
+ stringTag
+)
+
+var predeclared = []types.Type{
+ // basic types
+ types.Typ[types.Bool],
+ types.Typ[types.Int],
+ types.Typ[types.Int8],
+ types.Typ[types.Int16],
+ types.Typ[types.Int32],
+ types.Typ[types.Int64],
+ types.Typ[types.Uint],
+ types.Typ[types.Uint8],
+ types.Typ[types.Uint16],
+ types.Typ[types.Uint32],
+ types.Typ[types.Uint64],
+ types.Typ[types.Uintptr],
+ types.Typ[types.Float32],
+ types.Typ[types.Float64],
+ types.Typ[types.Complex64],
+ types.Typ[types.Complex128],
+ types.Typ[types.String],
+
+ // untyped types
+ types.Typ[types.UntypedBool],
+ types.Typ[types.UntypedInt],
+ types.Typ[types.UntypedRune],
+ types.Typ[types.UntypedFloat],
+ types.Typ[types.UntypedComplex],
+ types.Typ[types.UntypedString],
+ types.Typ[types.UntypedNil],
+
+ // package unsafe
+ types.Typ[types.UnsafePointer],
+
+ // aliases
+ types.UniverseByte,
+ types.UniverseRune,
+
+ types.Universe.Lookup("error").Type(),
+}
diff --git a/llgo/third_party/go.tools/go/loader/cgo.go b/llgo/third_party/go.tools/go/loader/cgo.go
new file mode 100644
index 0000000000000000000000000000000000000000..299e72579f27e92c3f368b304cf80c1715a8c223
--- /dev/null
+++ b/llgo/third_party/go.tools/go/loader/cgo.go
@@ -0,0 +1,199 @@
+package loader
+
+// This file handles cgo preprocessing of files containing `import "C"`.
+//
+// DESIGN
+//
+// The approach taken is to run the cgo processor on the package's
+// CgoFiles and parse the output, faking the filenames of the
+// resulting ASTs so that the synthetic file containing the C types is
+// called "C" (e.g. "~/go/src/net/C") and the preprocessed files
+// have their original names (e.g. "~/go/src/net/cgo_unix.go"),
+// not the names of the actual temporary files.
+//
+// The advantage of this approach is its fidelity to 'go build'. The
+// downside is that the token.Position.Offset for each AST node is
+// incorrect, being an offset within the temporary file. Line numbers
+// should still be correct because of the //line comments.
+//
+// The logic of this file is mostly plundered from the 'go build'
+// tool, which also invokes the cgo preprocessor.
+//
+//
+// REJECTED ALTERNATIVE
+//
+// An alternative approach that we explored is to extend go/types'
+// Importer mechanism to provide the identity of the importing package
+// so that each time `import "C"` appears it resolves to a different
+// synthetic package containing just the objects needed in that case.
+// The loader would invoke cgo but parse only the cgo_types.go file
+// defining the package-level objects, discarding the other files
+// resulting from preprocessing.
+//
+// The benefit of this approach would have been that source-level
+// syntax information would correspond exactly to the original cgo
+// file, with no preprocessing involved, making source tools like
+// godoc, oracle, and eg happy. However, the approach was rejected
+// due to the additional complexity it would impose on go/types. (It
+// made for a beautiful demo, though.)
+//
+// cgo files, despite their *.go extension, are not legal Go source
+// files per the specification since they may refer to unexported
+// members of package "C" such as C.int. Also, a function such as
+// C.getpwent has in effect two types, one matching its C type and one
+// which additionally returns (errno C.int). The cgo preprocessor
+// uses name mangling to distinguish these two functions in the
+// processed code, but go/types would need to duplicate this logic in
+// its handling of function calls, analogous to the treatment of map
+// lookups in which y=m[k] and y,ok=m[k] are both legal.
+
+import (
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strings"
+)
+
+// processCgoFiles invokes the cgo preprocessor on bp.CgoFiles, parses
+// the output and returns the resulting ASTs.
+//
+func processCgoFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) {
+ tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C")
+ if err != nil {
+ return nil, err
+ }
+ defer os.RemoveAll(tmpdir)
+
+ pkgdir := bp.Dir
+ if DisplayPath != nil {
+ pkgdir = DisplayPath(pkgdir)
+ }
+
+ cgoFiles, cgoDisplayFiles, err := runCgo(bp, pkgdir, tmpdir)
+ if err != nil {
+ return nil, err
+ }
+ var files []*ast.File
+ for i := range cgoFiles {
+ rd, err := os.Open(cgoFiles[i])
+ if err != nil {
+ return nil, err
+ }
+ defer rd.Close()
+ display := filepath.Join(bp.Dir, cgoDisplayFiles[i])
+ f, err := parser.ParseFile(fset, display, rd, mode)
+ if err != nil {
+ return nil, err
+ }
+ files = append(files, f)
+ }
+ return files, nil
+}
+
+var cgoRe = regexp.MustCompile(`[/\\:]`)
+
+// runCgo invokes the cgo preprocessor on bp.CgoFiles and returns two
+// lists of files: the resulting processed files (in temporary
+// directory tmpdir) and the corresponding names of the unprocessed files.
+//
+// runCgo is adapted from (*builder).cgo in
+// $GOROOT/src/cmd/go/build.go, but these features are unsupported:
+// pkg-config, Objective C, CGOPKGPATH, CGO_FLAGS.
+//
+func runCgo(bp *build.Package, pkgdir, tmpdir string) (files, displayFiles []string, err error) {
+ cgoCPPFLAGS, _, _, _ := cflags(bp, true)
+ _, cgoexeCFLAGS, _, _ := cflags(bp, false)
+
+ if len(bp.CgoPkgConfig) > 0 {
+ return nil, nil, fmt.Errorf("cgo pkg-config not supported")
+ }
+
+ // Allows including _cgo_export.h from .[ch] files in the package.
+ cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir)
+
+ // _cgo_gotypes.go (displayed "C") contains the type definitions.
+ files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go"))
+ displayFiles = append(displayFiles, "C")
+ for _, fn := range bp.CgoFiles {
+ // "foo.cgo1.go" (displayed "foo.go") is the processed Go source.
+ f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_")
+ files = append(files, filepath.Join(tmpdir, f+"cgo1.go"))
+ displayFiles = append(displayFiles, fn)
+ }
+
+ var cgoflags []string
+ if bp.Goroot && bp.ImportPath == "runtime/cgo" {
+ cgoflags = append(cgoflags, "-import_runtime_cgo=false")
+ }
+ if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" {
+ cgoflags = append(cgoflags, "-import_syscall=false")
+ }
+
+ args := stringList(
+ "go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--",
+ cgoCPPFLAGS, cgoexeCFLAGS, bp.CgoFiles,
+ )
+ if false {
+ log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir)
+ }
+ cmd := exec.Command(args[0], args[1:]...)
+ cmd.Dir = pkgdir
+ cmd.Stdout = os.Stderr
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err)
+ }
+
+ return files, displayFiles, nil
+}
+
+// -- unmodified from 'go build' ---------------------------------------
+
+// Return the flags to use when invoking the C or C++ compilers, or cgo.
+func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) {
+ var defaults string
+ if def {
+ defaults = "-g -O2"
+ }
+
+ cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS)
+ cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS)
+ cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS)
+ ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS)
+ return
+}
+
+// envList returns the value of the given environment variable broken
+// into fields, using the default value when the variable is empty.
+func envList(key, def string) []string {
+ v := os.Getenv(key)
+ if v == "" {
+ v = def
+ }
+ return strings.Fields(v)
+}
+
+// stringList's arguments should be a sequence of string or []string values.
+// stringList flattens them into a single []string.
+func stringList(args ...interface{}) []string {
+ var x []string
+ for _, arg := range args {
+ switch arg := arg.(type) {
+ case []string:
+ x = append(x, arg...)
+ case string:
+ x = append(x, arg)
+ default:
+ panic("stringList: invalid argument")
+ }
+ }
+ return x
+}
diff --git a/llgo/third_party/go.tools/go/loader/loader.go b/llgo/third_party/go.tools/go/loader/loader.go
new file mode 100644
index 0000000000000000000000000000000000000000..e439f7be91a89a55a9a84cf893b64ed3f0e09224
--- /dev/null
+++ b/llgo/third_party/go.tools/go/loader/loader.go
@@ -0,0 +1,856 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package loader loads, parses and type-checks packages of Go code
+// plus their transitive closure, and retains both the ASTs and the
+// derived facts.
+//
+// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE.
+//
+// The package defines two primary types: Config, which specifies a
+// set of initial packages to load and various other options; and
+// Program, which is the result of successfully loading the packages
+// specified by a configuration.
+//
+// The configuration can be set directly, but *Config provides various
+// convenience methods to simplify the common cases, each of which can
+// be called any number of times. Finally, these are followed by a
+// call to Load() to actually load and type-check the program.
+//
+// var conf loader.Config
+//
+// // Use the command-line arguments to specify
+// // a set of initial packages to load from source.
+// // See FromArgsUsage for help.
+// rest, err := conf.FromArgs(os.Args[1:], wantTests)
+//
+// // Parse the specified files and create an ad-hoc package with path "foo".
+// // All files must have the same 'package' declaration.
+// err := conf.CreateFromFilenames("foo", "foo.go", "bar.go")
+//
+// // Create an ad-hoc package with path "foo" from
+// // the specified already-parsed files.
+// // All ASTs must have the same 'package' declaration.
+// err := conf.CreateFromFiles("foo", parsedFiles)
+//
+// // Add "runtime" to the set of packages to be loaded.
+// conf.Import("runtime")
+//
+// // Adds "fmt" and "fmt_test" to the set of packages
+// // to be loaded. "fmt" will include *_test.go files.
+// err := conf.ImportWithTests("fmt")
+//
+// // Finally, load all the packages specified by the configuration.
+// prog, err := conf.Load()
+//
+//
+// CONCEPTS AND TERMINOLOGY
+//
+// An AD-HOC package is one specified as a set of source files on the
+// command line. In the simplest case, it may consist of a single file
+// such as $GOROOT/src/net/http/triv.go.
+//
+// EXTERNAL TEST packages are those comprised of a set of *_test.go
+// files all with the same 'package foo_test' declaration, all in the
+// same directory. (go/build.Package calls these files XTestFiles.)
+//
+// An IMPORTABLE package is one that can be referred to by some import
+// spec. The Path() of each importable package is unique within a
+// Program.
+//
+// Ad-hoc packages and external test packages are NON-IMPORTABLE. The
+// Path() of an ad-hoc package is inferred from the package
+// declarations of its files and is therefore not a unique package key.
+// For example, Config.CreatePkgs may specify two initial ad-hoc
+// packages both called "main".
+//
+// An AUGMENTED package is an importable package P plus all the
+// *_test.go files with same 'package foo' declaration as P.
+// (go/build.Package calls these files TestFiles.)
+//
+// The INITIAL packages are those specified in the configuration. A
+// DEPENDENCY is a package loaded to satisfy an import in an initial
+// package or another dependency.
+//
+package loader
+
+// 'go test', in-package test files, and import cycles
+// ---------------------------------------------------
+//
+// An external test package may depend upon members of the augmented
+// package that are not in the unaugmented package, such as functions
+// that expose internals. (See bufio/export_test.go for an example.)
+// So, the loader must ensure that for each external test package
+// it loads, it also augments the corresponding non-test package.
+//
+// The import graph over n unaugmented packages must be acyclic; the
+// import graph over n-1 unaugmented packages plus one augmented
+// package must also be acyclic. ('go test' relies on this.) But the
+// import graph over n augmented packages may contain cycles.
+//
+// First, all the (unaugmented) non-test packages and their
+// dependencies are imported in the usual way; the loader reports an
+// error if it detects an import cycle.
+//
+// Then, each package P for which testing is desired is augmented by
+// the list P' of its in-package test files, by calling
+// (*types.Checker).Files. This arrangement ensures that P' may
+// reference definitions within P, but P may not reference definitions
+// within P'. Furthermore, P' may import any other package, including
+// ones that depend upon P, without an import cycle error.
+//
+// Consider two packages A and B, both of which have lists of
+// in-package test files we'll call A' and B', and which have the
+// following import graph edges:
+// B imports A
+// B' imports A
+// A' imports B
+// This last edge would be expected to create an error were it not
+// for the special type-checking discipline above.
+// Cycles of size greater than two are possible. For example:
+// compress/bzip2/bzip2_test.go (package bzip2) imports "io/ioutil"
+// io/ioutil/tempfile_test.go (package ioutil) imports "regexp"
+// regexp/exec_test.go (package regexp) imports "compress/bzip2"
+
+// TODO(adonovan):
+// - (*Config).ParseFile is very handy, but feels like feature creep.
+// (*Config).CreateFromFiles has a nasty precondition.
+// - s/path/importPath/g to avoid ambiguity with other meanings of
+// "path": a file name, a colon-separated directory list.
+// - cache the calls to build.Import so we don't do it three times per
+// test package.
+// - Thorough overhaul of package documentation.
+// - Certain errors (e.g. parse error in x_test.go files, or failure to
+// import an initial package) still cause Load() to fail hard.
+// Fix that. (It's tricky because of the way x_test files are parsed
+// eagerly.)
+
+import (
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "os"
+ "strings"
+
+ "llvm.org/llgo/third_party/go.tools/astutil"
+ "llvm.org/llgo/third_party/go.tools/go/gcimporter"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// Config specifies the configuration for a program to load.
+// The zero value for Config is a ready-to-use default configuration.
+type Config struct {
+ // Fset is the file set for the parser to use when loading the
+ // program. If nil, it will be lazily initialized by any
+ // method of Config.
+ Fset *token.FileSet
+
+ // ParserMode specifies the mode to be used by the parser when
+ // loading source packages.
+ ParserMode parser.Mode
+
+ // TypeChecker contains options relating to the type checker.
+ //
+ // The supplied IgnoreFuncBodies is not used; the effective
+ // value comes from the TypeCheckFuncBodies func below.
+ //
+ // TypeChecker.Packages is lazily initialized during Load.
+ TypeChecker types.Config
+
+ // TypeCheckFuncBodies is a predicate over package import
+ // paths. A package for which the predicate is false will
+ // have its package-level declarations type checked, but not
+ // its function bodies; this can be used to quickly load
+ // dependencies from source. If nil, all func bodies are type
+ // checked.
+ TypeCheckFuncBodies func(string) bool
+
+ // SourceImports determines whether to satisfy dependencies by
+ // loading Go source code.
+ //
+ // If true, the entire program---the initial packages and
+ // their transitive closure of dependencies---will be loaded,
+ // parsed and type-checked. This is required for
+ // whole-program analyses such as pointer analysis.
+ //
+ // If false, the TypeChecker.Import mechanism will be used
+ // instead. Since that typically supplies only the types of
+ // package-level declarations and values of constants, but no
+ // code, it will not yield a whole program. It is intended
+ // for analyses that perform modular analysis of a
+ // single package, e.g. traditional compilation.
+ //
+ // The initial packages (CreatePkgs and ImportPkgs) are always
+ // loaded from Go source, regardless of this flag's setting.
+ SourceImports bool
+
+ // If Build is non-nil, it is used to locate source packages.
+ // Otherwise &build.Default is used.
+ //
+ // By default, cgo is invoked to preprocess Go files that
+ // import the fake package "C". This behaviour can be
+ // disabled by setting CGO_ENABLED=0 in the environment prior
+ // to startup, or by setting Build.CgoEnabled=false.
+ Build *build.Context
+
+ // If DisplayPath is non-nil, it is used to transform each
+ // file name obtained from Build.Import(). This can be used
+ // to prevent a virtualized build.Config's file names from
+ // leaking into the user interface.
+ DisplayPath func(path string) string
+
+ // If AllowErrors is true, Load will return a Program even
+ // if some of the its packages contained I/O, parser or type
+ // errors; such errors are accessible via PackageInfo.Errors. If
+ // false, Load will fail if any package had an error.
+ AllowErrors bool
+
+ // CreatePkgs specifies a list of non-importable initial
+ // packages to create. Each element specifies a list of
+ // parsed files to be type-checked into a new package, and a
+ // path for that package. If the path is "", the package's
+ // name will be used instead. The path needn't be globally
+ // unique.
+ //
+ // The resulting packages will appear in the corresponding
+ // elements of the Program.Created slice.
+ CreatePkgs []CreatePkg
+
+ // ImportPkgs specifies a set of initial packages to load from
+ // source. The map keys are package import paths, used to
+ // locate the package relative to $GOROOT. The corresponding
+ // values indicate whether to augment the package by *_test.go
+ // files in a second pass.
+ ImportPkgs map[string]bool
+}
+
+type CreatePkg struct {
+ Path string
+ Files []*ast.File
+}
+
+// A Program is a Go program loaded from source or binary
+// as specified by a Config.
+type Program struct {
+ Fset *token.FileSet // the file set for this program
+
+ // Created[i] contains the initial package whose ASTs were
+ // supplied by Config.CreatePkgs[i].
+ Created []*PackageInfo
+
+ // Imported contains the initially imported packages,
+ // as specified by Config.ImportPkgs.
+ Imported map[string]*PackageInfo
+
+ // ImportMap is the canonical mapping of import paths to
+ // packages used by the type-checker (Config.TypeChecker.Packages).
+ // It contains all Imported initial packages, but not Created
+ // ones, and all imported dependencies.
+ ImportMap map[string]*types.Package
+
+ // AllPackages contains the PackageInfo of every package
+ // encountered by Load: all initial packages and all
+ // dependencies, including incomplete ones.
+ AllPackages map[*types.Package]*PackageInfo
+}
+
+// PackageInfo holds the ASTs and facts derived by the type-checker
+// for a single package.
+//
+// Not mutated once exposed via the API.
+//
+type PackageInfo struct {
+ Pkg *types.Package
+ Importable bool // true if 'import "Pkg.Path()"' would resolve to this
+ TransitivelyErrorFree bool // true if Pkg and all its dependencies are free of errors
+ Files []*ast.File // syntax trees for the package's files
+ Errors []error // non-nil if the package had errors
+ types.Info // type-checker deductions.
+
+ checker *types.Checker // transient type-checker state
+ errorFunc func(error)
+}
+
+func (info *PackageInfo) String() string { return info.Pkg.Path() }
+
+func (info *PackageInfo) appendError(err error) {
+ if info.errorFunc != nil {
+ info.errorFunc(err)
+ } else {
+ fmt.Fprintln(os.Stderr, err)
+ }
+ info.Errors = append(info.Errors, err)
+}
+
+func (conf *Config) fset() *token.FileSet {
+ if conf.Fset == nil {
+ conf.Fset = token.NewFileSet()
+ }
+ return conf.Fset
+}
+
+// ParseFile is a convenience function that invokes the parser using
+// the Config's FileSet, which is initialized if nil.
+//
+func (conf *Config) ParseFile(filename string, src interface{}) (*ast.File, error) {
+ // TODO(adonovan): use conf.build() etc like parseFiles does.
+ return parser.ParseFile(conf.fset(), filename, src, conf.ParserMode)
+}
+
+// FromArgsUsage is a partial usage message that applications calling
+// FromArgs may wish to include in their -help output.
+const FromArgsUsage = `
+ is a list of arguments denoting a set of initial packages.
+It may take one of two forms:
+
+1. A list of *.go source files.
+
+ All of the specified files are loaded, parsed and type-checked
+ as a single package. All the files must belong to the same directory.
+
+2. A list of import paths, each denoting a package.
+
+ The package's directory is found relative to the $GOROOT and
+ $GOPATH using similar logic to 'go build', and the *.go files in
+ that directory are loaded, parsed and type-checked as a single
+ package.
+
+ In addition, all *_test.go files in the directory are then loaded
+ and parsed. Those files whose package declaration equals that of
+ the non-*_test.go files are included in the primary package. Test
+ files whose package declaration ends with "_test" are type-checked
+ as another package, the 'external' test package, so that a single
+ import path may denote two packages. (Whether this behaviour is
+ enabled is tool-specific, and may depend on additional flags.)
+
+ Due to current limitations in the type-checker, only the first
+ import path of the command line will contribute any tests.
+
+A '--' argument terminates the list of packages.
+`
+
+// FromArgs interprets args as a set of initial packages to load from
+// source and updates the configuration. It returns the list of
+// unconsumed arguments.
+//
+// It is intended for use in command-line interfaces that require a
+// set of initial packages to be specified; see FromArgsUsage message
+// for details.
+//
+func (conf *Config) FromArgs(args []string, xtest bool) (rest []string, err error) {
+ for i, arg := range args {
+ if arg == "--" {
+ rest = args[i+1:]
+ args = args[:i]
+ break // consume "--" and return the remaining args
+ }
+ }
+
+ if len(args) > 0 && strings.HasSuffix(args[0], ".go") {
+ // Assume args is a list of a *.go files
+ // denoting a single ad-hoc package.
+ for _, arg := range args {
+ if !strings.HasSuffix(arg, ".go") {
+ return nil, fmt.Errorf("named files must be .go files: %s", arg)
+ }
+ }
+ err = conf.CreateFromFilenames("", args...)
+ } else {
+ // Assume args are directories each denoting a
+ // package and (perhaps) an external test, iff xtest.
+ for _, arg := range args {
+ if xtest {
+ err = conf.ImportWithTests(arg)
+ if err != nil {
+ break
+ }
+ } else {
+ conf.Import(arg)
+ }
+ }
+ }
+
+ return
+}
+
+// CreateFromFilenames is a convenience function that parses the
+// specified *.go files and adds a package entry for them to
+// conf.CreatePkgs.
+//
+// It fails if any file could not be loaded or parsed.
+//
+func (conf *Config) CreateFromFilenames(path string, filenames ...string) error {
+ files, errs := parseFiles(conf.fset(), conf.build(), nil, ".", filenames, conf.ParserMode)
+ if len(errs) > 0 {
+ return errs[0]
+ }
+ conf.CreateFromFiles(path, files...)
+ return nil
+}
+
+// CreateFromFiles is a convenience function that adds a CreatePkgs
+// entry to create package of the specified path and parsed files.
+//
+// Precondition: conf.Fset is non-nil and was the fileset used to parse
+// the files. (e.g. the files came from conf.ParseFile().)
+//
+func (conf *Config) CreateFromFiles(path string, files ...*ast.File) {
+ if conf.Fset == nil {
+ panic("nil Fset")
+ }
+ conf.CreatePkgs = append(conf.CreatePkgs, CreatePkg{path, files})
+}
+
+// ImportWithTests is a convenience function that adds path to
+// ImportPkgs, the set of initial source packages located relative to
+// $GOPATH. The package will be augmented by any *_test.go files in
+// its directory that contain a "package x" (not "package x_test")
+// declaration.
+//
+// In addition, if any *_test.go files contain a "package x_test"
+// declaration, an additional package comprising just those files will
+// be added to CreatePkgs.
+//
+func (conf *Config) ImportWithTests(path string) error {
+ if path == "unsafe" {
+ return nil // ignore; not a real package
+ }
+ conf.Import(path)
+
+ // Load the external test package.
+ bp, err := conf.findSourcePackage(path)
+ if err != nil {
+ return err // package not found
+ }
+ xtestFiles, errs := conf.parsePackageFiles(bp, 'x')
+ if len(errs) > 0 {
+ // TODO(adonovan): fix: parse errors in x_test.go files
+ // cause FromArgs() to fail completely.
+ return errs[0] // I/O or parse error
+ }
+ if len(xtestFiles) > 0 {
+ conf.CreateFromFiles(path+"_test", xtestFiles...)
+ }
+
+ // Mark the non-xtest package for augmentation with
+ // in-package *_test.go files when we import it below.
+ conf.ImportPkgs[path] = true
+ return nil
+}
+
+// Import is a convenience function that adds path to ImportPkgs, the
+// set of initial packages that will be imported from source.
+//
+func (conf *Config) Import(path string) {
+ if path == "unsafe" {
+ return // ignore; not a real package
+ }
+ if conf.ImportPkgs == nil {
+ conf.ImportPkgs = make(map[string]bool)
+ }
+ // Subtle: adds value 'false' unless value is already true.
+ conf.ImportPkgs[path] = conf.ImportPkgs[path] // unaugmented source package
+}
+
+// PathEnclosingInterval returns the PackageInfo and ast.Node that
+// contain source interval [start, end), and all the node's ancestors
+// up to the AST root. It searches all ast.Files of all packages in prog.
+// exact is defined as for astutil.PathEnclosingInterval.
+//
+// The zero value is returned if not found.
+//
+func (prog *Program) PathEnclosingInterval(start, end token.Pos) (pkg *PackageInfo, path []ast.Node, exact bool) {
+ for _, info := range prog.AllPackages {
+ for _, f := range info.Files {
+ if !tokenFileContainsPos(prog.Fset.File(f.Pos()), start) {
+ continue
+ }
+ if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil {
+ return info, path, exact
+ }
+ }
+ }
+ return nil, nil, false
+}
+
+// InitialPackages returns a new slice containing the set of initial
+// packages (Created + Imported) in unspecified order.
+//
+func (prog *Program) InitialPackages() []*PackageInfo {
+ infos := make([]*PackageInfo, 0, len(prog.Created)+len(prog.Imported))
+ infos = append(infos, prog.Created...)
+ for _, info := range prog.Imported {
+ infos = append(infos, info)
+ }
+ return infos
+}
+
+// ---------- Implementation ----------
+
+// importer holds the working state of the algorithm.
+type importer struct {
+ conf *Config // the client configuration
+ prog *Program // resulting program
+ imported map[string]*importInfo // all imported packages (incl. failures) by import path
+}
+
+// importInfo tracks the success or failure of a single import.
+type importInfo struct {
+ info *PackageInfo // results of typechecking (including errors)
+ err error // reason for failure to make a package
+}
+
+// Load creates the initial packages specified by conf.{Create,Import}Pkgs,
+// loading their dependencies packages as needed.
+//
+// On success, Load returns a Program containing a PackageInfo for
+// each package. On failure, it returns an error.
+//
+// If AllowErrors is true, Load will return a Program even if some
+// packages contained I/O, parser or type errors, or if dependencies
+// were missing. (Such errors are accessible via PackageInfo.Errors. If
+// false, Load will fail if any package had an error.
+//
+// It is an error if no packages were loaded.
+//
+func (conf *Config) Load() (*Program, error) {
+ // Initialize by setting the conf's copy, so all copies of
+ // TypeChecker agree on the identity of the map.
+ if conf.TypeChecker.Packages == nil {
+ conf.TypeChecker.Packages = make(map[string]*types.Package)
+ }
+
+ // Create a simple default error handler for parse/type errors.
+ if conf.TypeChecker.Error == nil {
+ conf.TypeChecker.Error = func(e error) { fmt.Fprintln(os.Stderr, e) }
+ }
+
+ prog := &Program{
+ Fset: conf.fset(),
+ Imported: make(map[string]*PackageInfo),
+ ImportMap: conf.TypeChecker.Packages,
+ AllPackages: make(map[*types.Package]*PackageInfo),
+ }
+
+ imp := importer{
+ conf: conf,
+ prog: prog,
+ imported: make(map[string]*importInfo),
+ }
+
+ for path := range conf.ImportPkgs {
+ info, err := imp.importPackage(path)
+ if err != nil {
+ return nil, err // failed to create package
+ }
+ prog.Imported[path] = info
+ }
+
+ // Now augment those packages that need it.
+ for path, augment := range conf.ImportPkgs {
+ if augment {
+ // Find and create the actual package.
+ bp, err := conf.findSourcePackage(path)
+ if err != nil {
+ // "Can't happen" because of previous loop.
+ return nil, err // package not found
+ }
+
+ info := imp.imported[path].info // must be non-nil, see above
+ files, errs := imp.conf.parsePackageFiles(bp, 't')
+ for _, err := range errs {
+ info.appendError(err)
+ }
+ typeCheckFiles(info, files...)
+ }
+ }
+
+ for _, create := range conf.CreatePkgs {
+ path := create.Path
+ if create.Path == "" && len(create.Files) > 0 {
+ path = create.Files[0].Name.Name
+ }
+ info := imp.newPackageInfo(path)
+ typeCheckFiles(info, create.Files...)
+ prog.Created = append(prog.Created, info)
+ }
+
+ if len(prog.Imported)+len(prog.Created) == 0 {
+ return nil, errors.New("no initial packages were specified")
+ }
+
+ // Create infos for indirectly imported packages.
+ // e.g. incomplete packages without syntax, loaded from export data.
+ for _, obj := range prog.ImportMap {
+ info := prog.AllPackages[obj]
+ if info == nil {
+ prog.AllPackages[obj] = &PackageInfo{Pkg: obj, Importable: true}
+ } else {
+ // finished
+ info.checker = nil
+ info.errorFunc = nil
+ }
+ }
+
+ if !conf.AllowErrors {
+ // Report errors in indirectly imported packages.
+ var errpkgs []string
+ for _, info := range prog.AllPackages {
+ if len(info.Errors) > 0 {
+ errpkgs = append(errpkgs, info.Pkg.Path())
+ }
+ }
+ if errpkgs != nil {
+ var more string
+ if len(errpkgs) > 3 {
+ more = fmt.Sprintf(" and %d more", len(errpkgs)-3)
+ errpkgs = errpkgs[:3]
+ }
+ return nil, fmt.Errorf("couldn't load packages due to errors: %s%s",
+ strings.Join(errpkgs, ", "), more)
+ }
+ }
+
+ markErrorFreePackages(prog.AllPackages)
+
+ return prog, nil
+}
+
+// markErrorFreePackages sets the TransitivelyErrorFree flag on all
+// applicable packages.
+func markErrorFreePackages(allPackages map[*types.Package]*PackageInfo) {
+ // Build the transpose of the import graph.
+ importedBy := make(map[*types.Package]map[*types.Package]bool)
+ for P := range allPackages {
+ for _, Q := range P.Imports() {
+ clients, ok := importedBy[Q]
+ if !ok {
+ clients = make(map[*types.Package]bool)
+ importedBy[Q] = clients
+ }
+ clients[P] = true
+ }
+ }
+
+ // Find all packages reachable from some error package.
+ reachable := make(map[*types.Package]bool)
+ var visit func(*types.Package)
+ visit = func(p *types.Package) {
+ if !reachable[p] {
+ reachable[p] = true
+ for q := range importedBy[p] {
+ visit(q)
+ }
+ }
+ }
+ for _, info := range allPackages {
+ if len(info.Errors) > 0 {
+ visit(info.Pkg)
+ }
+ }
+
+ // Mark the others as "transitively error-free".
+ for _, info := range allPackages {
+ if !reachable[info.Pkg] {
+ info.TransitivelyErrorFree = true
+ }
+ }
+}
+
+// build returns the effective build context.
+func (conf *Config) build() *build.Context {
+ if conf.Build != nil {
+ return conf.Build
+ }
+ return &build.Default
+}
+
+// findSourcePackage locates the specified (possibly empty) package
+// using go/build logic. It returns an error if not found.
+//
+func (conf *Config) findSourcePackage(path string) (*build.Package, error) {
+ // Import(srcDir="") disables local imports, e.g. import "./foo".
+ bp, err := conf.build().Import(path, "", 0)
+ if _, ok := err.(*build.NoGoError); ok {
+ return bp, nil // empty directory is not an error
+ }
+ return bp, err
+}
+
+// parsePackageFiles enumerates the files belonging to package path,
+// then loads, parses and returns them, plus a list of I/O or parse
+// errors that were encountered.
+//
+// 'which' indicates which files to include:
+// 'g': include non-test *.go source files (GoFiles + processed CgoFiles)
+// 't': include in-package *_test.go source files (TestGoFiles)
+// 'x': include external *_test.go source files. (XTestGoFiles)
+//
+func (conf *Config) parsePackageFiles(bp *build.Package, which rune) ([]*ast.File, []error) {
+ var filenames []string
+ switch which {
+ case 'g':
+ filenames = bp.GoFiles
+ case 't':
+ filenames = bp.TestGoFiles
+ case 'x':
+ filenames = bp.XTestGoFiles
+ default:
+ panic(which)
+ }
+
+ files, errs := parseFiles(conf.fset(), conf.build(), conf.DisplayPath, bp.Dir, filenames, conf.ParserMode)
+
+ // Preprocess CgoFiles and parse the outputs (sequentially).
+ if which == 'g' && bp.CgoFiles != nil {
+ cgofiles, err := processCgoFiles(bp, conf.fset(), conf.DisplayPath, conf.ParserMode)
+ if err != nil {
+ errs = append(errs, err)
+ } else {
+ files = append(files, cgofiles...)
+ }
+ }
+
+ return files, errs
+}
+
+// doImport imports the package denoted by path.
+// It implements the types.Importer signature.
+//
+// imports is the type-checker's package canonicalization map.
+//
+// It returns an error if a package could not be created
+// (e.g. go/build or parse error), but type errors are reported via
+// the types.Config.Error callback (the first of which is also saved
+// in the package's PackageInfo).
+//
+// Idempotent.
+//
+func (imp *importer) doImport(imports map[string]*types.Package, path string) (*types.Package, error) {
+ // Package unsafe is handled specially, and has no PackageInfo.
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+
+ info, err := imp.importPackage(path)
+ if err != nil {
+ return nil, err
+ }
+
+ // Update the type checker's package map on success.
+ imports[path] = info.Pkg
+
+ return info.Pkg, nil
+}
+
+// importPackage imports the package with the given import path, plus
+// its dependencies.
+//
+// On success, it returns a PackageInfo, possibly containing errors.
+// importPackage returns an error if it couldn't even create the package.
+//
+// Precondition: path != "unsafe".
+//
+func (imp *importer) importPackage(path string) (*PackageInfo, error) {
+ ii, ok := imp.imported[path]
+ if !ok {
+ // In preorder, initialize the map entry to a cycle
+ // error in case importPackage(path) is called again
+ // before the import is completed.
+ ii = &importInfo{err: fmt.Errorf("import cycle in package %s", path)}
+ imp.imported[path] = ii
+
+ // Find and create the actual package.
+ if _, ok := imp.conf.ImportPkgs[path]; ok || imp.conf.SourceImports {
+ ii.info, ii.err = imp.importFromSource(path)
+ } else {
+ ii.info, ii.err = imp.importFromBinary(path)
+ }
+ if ii.info != nil {
+ ii.info.Importable = true
+ }
+ }
+
+ return ii.info, ii.err
+}
+
+// importFromBinary implements package loading from the client-supplied
+// external source, e.g. object files from the gc compiler.
+//
+func (imp *importer) importFromBinary(path string) (*PackageInfo, error) {
+ // Determine the caller's effective Import function.
+ importfn := imp.conf.TypeChecker.Import
+ if importfn == nil {
+ importfn = gcimporter.Import
+ }
+ pkg, err := importfn(imp.conf.TypeChecker.Packages, path)
+ if err != nil {
+ return nil, err
+ }
+ info := &PackageInfo{Pkg: pkg}
+ imp.prog.AllPackages[pkg] = info
+ return info, nil
+}
+
+// importFromSource implements package loading by parsing Go source files
+// located by go/build.
+//
+func (imp *importer) importFromSource(path string) (*PackageInfo, error) {
+ bp, err := imp.conf.findSourcePackage(path)
+ if err != nil {
+ return nil, err // package not found
+ }
+ // Type-check the package.
+ info := imp.newPackageInfo(path)
+ files, errs := imp.conf.parsePackageFiles(bp, 'g')
+ for _, err := range errs {
+ info.appendError(err)
+ }
+ typeCheckFiles(info, files...)
+ return info, nil
+}
+
+// typeCheckFiles adds the specified files to info and type-checks them.
+// The order of files determines the package initialization order.
+// It may be called multiple times.
+//
+// Errors are stored in the info.Errors field.
+func typeCheckFiles(info *PackageInfo, files ...*ast.File) {
+ info.Files = append(info.Files, files...)
+
+ // Ignore the returned (first) error since we already collect them all.
+ _ = info.checker.Files(files)
+}
+
+func (imp *importer) newPackageInfo(path string) *PackageInfo {
+ pkg := types.NewPackage(path, "")
+ info := &PackageInfo{
+ Pkg: pkg,
+ Info: types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Implicits: make(map[ast.Node]types.Object),
+ Scopes: make(map[ast.Node]*types.Scope),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ },
+ errorFunc: imp.conf.TypeChecker.Error,
+ }
+
+ // Copy the types.Config so we can vary it across PackageInfos.
+ tc := imp.conf.TypeChecker
+ tc.IgnoreFuncBodies = false
+ if f := imp.conf.TypeCheckFuncBodies; f != nil {
+ tc.IgnoreFuncBodies = !f(path)
+ }
+ tc.Import = imp.doImport // doImport wraps the user's importfn, effectively
+ tc.Error = info.appendError // appendError wraps the user's Error function
+
+ info.checker = types.NewChecker(&tc, imp.conf.fset(), pkg, &info.Info)
+ imp.prog.AllPackages[pkg] = info
+ return info
+}
diff --git a/llgo/third_party/go.tools/go/loader/loader_test.go b/llgo/third_party/go.tools/go/loader/loader_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..d510abac8278ca48a8dfa6a98b199149de4b17b4
--- /dev/null
+++ b/llgo/third_party/go.tools/go/loader/loader_test.go
@@ -0,0 +1,259 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package loader_test
+
+import (
+ "bytes"
+ "fmt"
+ "go/build"
+ "io"
+ "io/ioutil"
+ "os"
+ "sort"
+ "strings"
+ "testing"
+ "time"
+
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+)
+
+func loadFromArgs(args []string) (prog *loader.Program, rest []string, err error) {
+ conf := &loader.Config{}
+ rest, err = conf.FromArgs(args, true)
+ if err == nil {
+ prog, err = conf.Load()
+ }
+ return
+}
+
+func TestLoadFromArgs(t *testing.T) {
+ // Failed load: bad first import path causes parsePackageFiles to fail.
+ args := []string{"nosuchpkg", "errors"}
+ if _, _, err := loadFromArgs(args); err == nil {
+ t.Errorf("loadFromArgs(%q) succeeded, want failure", args)
+ } else {
+ // cannot find package: ok.
+ }
+
+ // Failed load: bad second import path proceeds to doImport0, which fails.
+ args = []string{"errors", "nosuchpkg"}
+ if _, _, err := loadFromArgs(args); err == nil {
+ t.Errorf("loadFromArgs(%q) succeeded, want failure", args)
+ } else {
+ // cannot find package: ok
+ }
+
+ // Successful load.
+ args = []string{"fmt", "errors", "--", "surplus"}
+ prog, rest, err := loadFromArgs(args)
+ if err != nil {
+ t.Fatalf("loadFromArgs(%q) failed: %s", args, err)
+ }
+ if got, want := fmt.Sprint(rest), "[surplus]"; got != want {
+ t.Errorf("loadFromArgs(%q) rest: got %s, want %s", args, got, want)
+ }
+ // Check list of Created packages.
+ var pkgnames []string
+ for _, info := range prog.Created {
+ pkgnames = append(pkgnames, info.Pkg.Path())
+ }
+ // All import paths may contribute tests.
+ if got, want := fmt.Sprint(pkgnames), "[fmt_test errors_test]"; got != want {
+ t.Errorf("Created: got %s, want %s", got, want)
+ }
+
+ // Check set of Imported packages.
+ pkgnames = nil
+ for path := range prog.Imported {
+ pkgnames = append(pkgnames, path)
+ }
+ sort.Strings(pkgnames)
+ // All import paths may contribute tests.
+ if got, want := fmt.Sprint(pkgnames), "[errors fmt]"; got != want {
+ t.Errorf("Loaded: got %s, want %s", got, want)
+ }
+
+ // Check set of transitive packages.
+ // There are >30 and the set may grow over time, so only check a few.
+ all := map[string]struct{}{}
+ for _, info := range prog.AllPackages {
+ all[info.Pkg.Path()] = struct{}{}
+ }
+ want := []string{"strings", "time", "runtime", "testing", "unicode"}
+ for _, w := range want {
+ if _, ok := all[w]; !ok {
+ t.Errorf("AllPackages: want element %s, got set %v", w, all)
+ }
+ }
+}
+
+func TestLoadFromArgsSource(t *testing.T) {
+ // mixture of *.go/non-go.
+ args := []string{"testdata/a.go", "fmt"}
+ prog, _, err := loadFromArgs(args)
+ if err == nil {
+ t.Errorf("loadFromArgs(%q) succeeded, want failure", args)
+ } else {
+ // "named files must be .go files: fmt": ok
+ }
+
+ // successful load
+ args = []string{"testdata/a.go", "testdata/b.go"}
+ prog, _, err = loadFromArgs(args)
+ if err != nil {
+ t.Fatalf("loadFromArgs(%q) failed: %s", args, err)
+ }
+ if len(prog.Created) != 1 {
+ t.Errorf("loadFromArgs(%q): got %d items, want 1", len(prog.Created))
+ }
+ if len(prog.Created) > 0 {
+ path := prog.Created[0].Pkg.Path()
+ if path != "P" {
+ t.Errorf("loadFromArgs(%q): got %v, want [P]", prog.Created, path)
+ }
+ }
+}
+
+type fakeFileInfo struct{}
+
+func (fakeFileInfo) Name() string { return "x.go" }
+func (fakeFileInfo) Sys() interface{} { return nil }
+func (fakeFileInfo) ModTime() time.Time { return time.Time{} }
+func (fakeFileInfo) IsDir() bool { return false }
+func (fakeFileInfo) Size() int64 { return 0 }
+func (fakeFileInfo) Mode() os.FileMode { return 0644 }
+
+var justXgo = [1]os.FileInfo{fakeFileInfo{}} // ["x.go"]
+
+func fakeContext(pkgs map[string]string) *build.Context {
+ ctxt := build.Default // copy
+ ctxt.GOROOT = "/go"
+ ctxt.GOPATH = ""
+ ctxt.IsDir = func(path string) bool { return true }
+ ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) { return justXgo[:], nil }
+ ctxt.OpenFile = func(path string) (io.ReadCloser, error) {
+ path = path[len("/go/src/"):]
+ return ioutil.NopCloser(bytes.NewBufferString(pkgs[path[0:1]])), nil
+ }
+ return &ctxt
+}
+
+func TestTransitivelyErrorFreeFlag(t *testing.T) {
+ // Create an minimal custom build.Context
+ // that fakes the following packages:
+ //
+ // a --> b --> c! c has an error
+ // \ d and e are transitively error-free.
+ // e --> d
+ //
+ // Each package [a-e] consists of one file, x.go.
+ pkgs := map[string]string{
+ "a": `package a; import (_ "b"; _ "e")`,
+ "b": `package b; import _ "c"`,
+ "c": `package c; func f() { _ = int(false) }`, // type error within function body
+ "d": `package d;`,
+ "e": `package e; import _ "d"`,
+ }
+ conf := loader.Config{
+ AllowErrors: true,
+ SourceImports: true,
+ Build: fakeContext(pkgs),
+ }
+ conf.Import("a")
+
+ prog, err := conf.Load()
+ if err != nil {
+ t.Errorf("Load failed: %s", err)
+ }
+ if prog == nil {
+ t.Fatalf("Load returned nil *Program")
+ }
+
+ for pkg, info := range prog.AllPackages {
+ var wantErr, wantTEF bool
+ switch pkg.Path() {
+ case "a", "b":
+ case "c":
+ wantErr = true
+ case "d", "e":
+ wantTEF = true
+ default:
+ t.Errorf("unexpected package: %q", pkg.Path())
+ continue
+ }
+
+ if (info.Errors != nil) != wantErr {
+ if wantErr {
+ t.Errorf("Package %q.Error = nil, want error", pkg.Path())
+ } else {
+ t.Errorf("Package %q has unexpected Errors: %v",
+ pkg.Path(), info.Errors)
+ }
+ }
+
+ if info.TransitivelyErrorFree != wantTEF {
+ t.Errorf("Package %q.TransitivelyErrorFree=%t, want %t",
+ pkg.Path(), info.TransitivelyErrorFree, wantTEF)
+ }
+ }
+}
+
+// Test that both syntax (scan/parse) and type errors are both recorded
+// (in PackageInfo.Errors) and reported (via Config.TypeChecker.Error).
+func TestErrorReporting(t *testing.T) {
+ pkgs := map[string]string{
+ "a": `package a; import _ "b"; var x int = false`,
+ "b": `package b; 'syntax error!`,
+ }
+ conf := loader.Config{
+ AllowErrors: true,
+ SourceImports: true,
+ Build: fakeContext(pkgs),
+ }
+ var allErrors []error
+ conf.TypeChecker.Error = func(err error) {
+ allErrors = append(allErrors, err)
+ }
+ conf.Import("a")
+
+ prog, err := conf.Load()
+ if err != nil {
+ t.Errorf("Load failed: %s", err)
+ }
+ if prog == nil {
+ t.Fatalf("Load returned nil *Program")
+ }
+
+ hasError := func(errors []error, substr string) bool {
+ for _, err := range errors {
+ if strings.Contains(err.Error(), substr) {
+ return true
+ }
+ }
+ return false
+ }
+
+ // TODO(adonovan): test keys of ImportMap.
+
+ // Check errors recorded in each PackageInfo.
+ for pkg, info := range prog.AllPackages {
+ switch pkg.Path() {
+ case "a":
+ if !hasError(info.Errors, "cannot convert false") {
+ t.Errorf("a.Errors = %v, want bool conversion (type) error", info.Errors)
+ }
+ case "b":
+ if !hasError(info.Errors, "rune literal not terminated") {
+ t.Errorf("b.Errors = %v, want unterminated literal (syntax) error", info.Errors)
+ }
+ }
+ }
+
+ // Check errors reported via error handler.
+ if !hasError(allErrors, "cannot convert false") ||
+ !hasError(allErrors, "rune literal not terminated") {
+ t.Errorf("allErrors = %v, want both syntax and type errors", allErrors)
+ }
+}
diff --git a/llgo/third_party/go.tools/go/loader/source_test.go b/llgo/third_party/go.tools/go/loader/source_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..d7ebd06fcd510290f72ae0d08bb97ea72d553a3d
--- /dev/null
+++ b/llgo/third_party/go.tools/go/loader/source_test.go
@@ -0,0 +1,126 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package loader_test
+
+// This file defines tests of source utilities.
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "strings"
+ "testing"
+
+ "llvm.org/llgo/third_party/go.tools/astutil"
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+)
+
+// findInterval parses input and returns the [start, end) positions of
+// the first occurrence of substr in input. f==nil indicates failure;
+// an error has already been reported in that case.
+//
+func findInterval(t *testing.T, fset *token.FileSet, input, substr string) (f *ast.File, start, end token.Pos) {
+ f, err := parser.ParseFile(fset, " ", input, 0)
+ if err != nil {
+ t.Errorf("parse error: %s", err)
+ return
+ }
+
+ i := strings.Index(input, substr)
+ if i < 0 {
+ t.Errorf("%q is not a substring of input", substr)
+ f = nil
+ return
+ }
+
+ filePos := fset.File(f.Package)
+ return f, filePos.Pos(i), filePos.Pos(i + len(substr))
+}
+
+func TestEnclosingFunction(t *testing.T) {
+ tests := []struct {
+ input string // the input file
+ substr string // first occurrence of this string denotes interval
+ fn string // name of expected containing function
+ }{
+ // We use distinctive numbers as syntactic landmarks.
+
+ // Ordinary function:
+ {`package main
+ func f() { println(1003) }`,
+ "100", "main.f"},
+ // Methods:
+ {`package main
+ type T int
+ func (t T) f() { println(200) }`,
+ "200", "(main.T).f"},
+ // Function literal:
+ {`package main
+ func f() { println(func() { print(300) }) }`,
+ "300", "main.f$1"},
+ // Doubly nested
+ {`package main
+ func f() { println(func() { print(func() { print(350) })})}`,
+ "350", "main.f$1$1"},
+ // Implicit init for package-level var initializer.
+ {"package main; var a = 400", "400", "main.init"},
+ // No code for constants:
+ {"package main; const a = 500", "500", "(none)"},
+ // Explicit init()
+ {"package main; func init() { println(600) }", "600", "main.init#1"},
+ // Multiple explicit init functions:
+ {`package main
+ func init() { println("foo") }
+ func init() { println(800) }`,
+ "800", "main.init#2"},
+ // init() containing FuncLit.
+ {`package main
+ func init() { println(func(){print(900)}) }`,
+ "900", "main.init#1$1"},
+ }
+ for _, test := range tests {
+ conf := loader.Config{Fset: token.NewFileSet()}
+ f, start, end := findInterval(t, conf.Fset, test.input, test.substr)
+ if f == nil {
+ continue
+ }
+ path, exact := astutil.PathEnclosingInterval(f, start, end)
+ if !exact {
+ t.Errorf("EnclosingFunction(%q) not exact", test.substr)
+ continue
+ }
+
+ conf.CreateFromFiles("main", f)
+
+ iprog, err := conf.Load()
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ prog := ssa.Create(iprog, 0)
+ pkg := prog.Package(iprog.Created[0].Pkg)
+ pkg.Build()
+
+ name := "(none)"
+ fn := ssa.EnclosingFunction(pkg, path)
+ if fn != nil {
+ name = fn.String()
+ }
+
+ if name != test.fn {
+ t.Errorf("EnclosingFunction(%q in %q) got %s, want %s",
+ test.substr, test.input, name, test.fn)
+ continue
+ }
+
+ // While we're here: test HasEnclosingFunction.
+ if has := ssa.HasEnclosingFunction(pkg, path); has != (fn != nil) {
+ t.Errorf("HasEnclosingFunction(%q in %q) got %v, want %v",
+ test.substr, test.input, has, fn != nil)
+ continue
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/go/loader/stdlib_test.go b/llgo/third_party/go.tools/go/loader/stdlib_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..f79748850c1d856873bc1a41e7b6d357db685bb0
--- /dev/null
+++ b/llgo/third_party/go.tools/go/loader/stdlib_test.go
@@ -0,0 +1,193 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package loader_test
+
+// This file enumerates all packages beneath $GOROOT, loads them, plus
+// their external tests if any, runs the type checker on them, and
+// prints some summary information.
+//
+// Run test with GOMAXPROCS=8.
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/token"
+ "io/ioutil"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+
+ "llvm.org/llgo/third_party/go.tools/go/buildutil"
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+func TestStdlib(t *testing.T) {
+ runtime.GC()
+ t0 := time.Now()
+ var memstats runtime.MemStats
+ runtime.ReadMemStats(&memstats)
+ alloc := memstats.Alloc
+
+ // Load, parse and type-check the program.
+ ctxt := build.Default // copy
+ ctxt.GOPATH = "" // disable GOPATH
+ conf := loader.Config{Build: &ctxt}
+ for _, path := range buildutil.AllPackages(conf.Build) {
+ if err := conf.ImportWithTests(path); err != nil {
+ t.Error(err)
+ }
+ }
+
+ prog, err := conf.Load()
+ if err != nil {
+ t.Fatalf("Load failed: %v", err)
+ }
+
+ t1 := time.Now()
+ runtime.GC()
+ runtime.ReadMemStats(&memstats)
+
+ numPkgs := len(prog.AllPackages)
+ if want := 205; numPkgs < want {
+ t.Errorf("Loaded only %d packages, want at least %d", numPkgs, want)
+ }
+
+ // Dump package members.
+ if false {
+ for pkg := range prog.AllPackages {
+ fmt.Printf("Package %s:\n", pkg.Path())
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ if ast.IsExported(name) {
+ fmt.Printf("\t%s\n", types.ObjectString(pkg, scope.Lookup(name)))
+ }
+ }
+ fmt.Println()
+ }
+ }
+
+ // Check that Test functions for io/ioutil, regexp and
+ // compress/bzip2 are all simultaneously present.
+ // (The apparent cycle formed when augmenting all three of
+ // these packages by their tests was the original motivation
+ // for reporting b/7114.)
+ //
+ // compress/bzip2.TestBitReader in bzip2_test.go imports io/ioutil
+ // io/ioutil.TestTempFile in tempfile_test.go imports regexp
+ // regexp.TestRE2Search in exec_test.go imports compress/bzip2
+ for _, test := range []struct{ pkg, fn string }{
+ {"io/ioutil", "TestTempFile"},
+ {"regexp", "TestRE2Search"},
+ {"compress/bzip2", "TestBitReader"},
+ } {
+ info := prog.Imported[test.pkg]
+ if info == nil {
+ t.Errorf("failed to load package %q", test.pkg)
+ continue
+ }
+ obj, _ := info.Pkg.Scope().Lookup(test.fn).(*types.Func)
+ if obj == nil {
+ t.Errorf("package %q has no func %q", test.pkg, test.fn)
+ continue
+ }
+ }
+
+ // Dump some statistics.
+
+ // determine line count
+ var lineCount int
+ prog.Fset.Iterate(func(f *token.File) bool {
+ lineCount += f.LineCount()
+ return true
+ })
+
+ t.Log("GOMAXPROCS: ", runtime.GOMAXPROCS(0))
+ t.Log("#Source lines: ", lineCount)
+ t.Log("Load/parse/typecheck: ", t1.Sub(t0))
+ t.Log("#MB: ", int64(memstats.Alloc-alloc)/1000000)
+}
+
+func TestCgoOption(t *testing.T) {
+ switch runtime.GOOS {
+ // On these systems, the net and os/user packages don't use cgo.
+ case "plan9", "solaris", "windows":
+ return
+ }
+ // In nocgo builds (e.g. linux-amd64-nocgo),
+ // there is no "runtime/cgo" package,
+ // so cgo-generated Go files will have a failing import.
+ if !build.Default.CgoEnabled {
+ return
+ }
+ // Test that we can load cgo-using packages with
+ // CGO_ENABLED=[01], which causes go/build to select pure
+ // Go/native implementations, respectively, based on build
+ // tags.
+ //
+ // Each entry specifies a package-level object and the generic
+ // file expected to define it when cgo is disabled.
+ // When cgo is enabled, the exact file is not specified (since
+ // it varies by platform), but must differ from the generic one.
+ //
+ // The test also loads the actual file to verify that the
+ // object is indeed defined at that location.
+ for _, test := range []struct {
+ pkg, name, genericFile string
+ }{
+ {"net", "cgoLookupHost", "cgo_stub.go"},
+ {"os/user", "lookupId", "lookup_stubs.go"},
+ } {
+ ctxt := build.Default
+ for _, ctxt.CgoEnabled = range []bool{false, true} {
+ conf := loader.Config{Build: &ctxt}
+ conf.Import(test.pkg)
+ prog, err := conf.Load()
+ if err != nil {
+ t.Errorf("Load failed: %v", err)
+ continue
+ }
+ info := prog.Imported[test.pkg]
+ if info == nil {
+ t.Errorf("package %s not found", test.pkg)
+ continue
+ }
+ obj := info.Pkg.Scope().Lookup(test.name)
+ if obj == nil {
+ t.Errorf("no object %s.%s", test.pkg, test.name)
+ continue
+ }
+ posn := prog.Fset.Position(obj.Pos())
+ t.Logf("%s: %s (CgoEnabled=%t)", posn, obj, ctxt.CgoEnabled)
+
+ gotFile := filepath.Base(posn.Filename)
+ filesMatch := gotFile == test.genericFile
+
+ if ctxt.CgoEnabled && filesMatch {
+ t.Errorf("CGO_ENABLED=1: %s found in %s, want native file",
+ obj, gotFile)
+ } else if !ctxt.CgoEnabled && !filesMatch {
+ t.Errorf("CGO_ENABLED=0: %s found in %s, want %s",
+ obj, gotFile, test.genericFile)
+ }
+
+ // Load the file and check the object is declared at the right place.
+ b, err := ioutil.ReadFile(posn.Filename)
+ if err != nil {
+ t.Errorf("can't read %s: %s", posn.Filename, err)
+ continue
+ }
+ line := string(bytes.Split(b, []byte("\n"))[posn.Line-1])
+ ident := line[posn.Column-1:]
+ if !strings.HasPrefix(ident, test.name) {
+ t.Errorf("%s: %s not declared here (looking at %q)", posn, obj, ident)
+ }
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/go/loader/testdata/a.go b/llgo/third_party/go.tools/go/loader/testdata/a.go
new file mode 100644
index 0000000000000000000000000000000000000000..bae395508837b68b49e0b350e8e95c003e9f65d0
--- /dev/null
+++ b/llgo/third_party/go.tools/go/loader/testdata/a.go
@@ -0,0 +1 @@
+package P
diff --git a/llgo/third_party/go.tools/go/loader/testdata/b.go b/llgo/third_party/go.tools/go/loader/testdata/b.go
new file mode 100644
index 0000000000000000000000000000000000000000..bae395508837b68b49e0b350e8e95c003e9f65d0
--- /dev/null
+++ b/llgo/third_party/go.tools/go/loader/testdata/b.go
@@ -0,0 +1 @@
+package P
diff --git a/llgo/third_party/go.tools/go/loader/util.go b/llgo/third_party/go.tools/go/loader/util.go
new file mode 100644
index 0000000000000000000000000000000000000000..467a74ceeb8b0ac6db27dd37f0508e03fbd2c003
--- /dev/null
+++ b/llgo/third_party/go.tools/go/loader/util.go
@@ -0,0 +1,96 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package loader
+
+import (
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/token"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+)
+
+// parseFiles parses the Go source files within directory dir and
+// returns the ASTs of the ones that could be at least partially parsed,
+// along with a list of I/O and parse errors encountered.
+//
+// I/O is done via ctxt, which may specify a virtual file system.
+// displayPath is used to transform the filenames attached to the ASTs.
+//
+func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, files []string, mode parser.Mode) ([]*ast.File, []error) {
+ if displayPath == nil {
+ displayPath = func(path string) string { return path }
+ }
+ isAbs := filepath.IsAbs
+ if ctxt.IsAbsPath != nil {
+ isAbs = ctxt.IsAbsPath
+ }
+ joinPath := filepath.Join
+ if ctxt.JoinPath != nil {
+ joinPath = ctxt.JoinPath
+ }
+ var wg sync.WaitGroup
+ n := len(files)
+ parsed := make([]*ast.File, n)
+ errors := make([]error, n)
+ for i, file := range files {
+ if !isAbs(file) {
+ file = joinPath(dir, file)
+ }
+ wg.Add(1)
+ go func(i int, file string) {
+ defer wg.Done()
+ var rd io.ReadCloser
+ var err error
+ if ctxt.OpenFile != nil {
+ rd, err = ctxt.OpenFile(file)
+ } else {
+ rd, err = os.Open(file)
+ }
+ if err != nil {
+ errors[i] = err // open failed
+ return
+ }
+
+ // ParseFile may return both an AST and an error.
+ parsed[i], errors[i] = parser.ParseFile(fset, displayPath(file), rd, mode)
+ rd.Close()
+ }(i, file)
+ }
+ wg.Wait()
+
+ // Eliminate nils, preserving order.
+ var o int
+ for _, f := range parsed {
+ if f != nil {
+ parsed[o] = f
+ o++
+ }
+ }
+ parsed = parsed[:o]
+
+ o = 0
+ for _, err := range errors {
+ if err != nil {
+ errors[o] = err
+ o++
+ }
+ }
+ errors = errors[:o]
+
+ return parsed, errors
+}
+
+// ---------- Internal helpers ----------
+
+// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos)
+func tokenFileContainsPos(f *token.File, pos token.Pos) bool {
+ p := int(pos)
+ base := f.Base()
+ return base <= p && p < base+f.Size()
+}
diff --git a/llgo/third_party/go.tools/go/pointer/TODO b/llgo/third_party/go.tools/go/pointer/TODO
new file mode 100644
index 0000000000000000000000000000000000000000..f95e70621d94bd1ab12a1c9a04af29a79a6972fa
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/TODO
@@ -0,0 +1,33 @@
+-*- text -*-
+
+Pointer analysis to-do list
+===========================
+
+CONSTRAINT GENERATION:
+- support reflection:
+ - a couple of operators are missing
+ - reflect.Values may contain lvalues (CanAddr)
+- implement native intrinsics. These vary by platform.
+- add to pts(a.panic) a label representing all runtime panics, e.g.
+ runtime.{TypeAssertionError,errorString,errorCString}.
+
+OPTIMISATIONS
+- pre-solver:
+ pointer equivalence: extend HVN to HRU
+ location equivalence
+- solver: HCD, LCD.
+- experiment with map+slice worklist in lieu of bitset.
+ It may have faster insert.
+
+MISC:
+- Test on all platforms.
+ Currently we assume these go/build tags: linux, amd64, !cgo.
+
+MAINTAINABILITY
+- Think about ways to make debugging this code easier. PTA logs
+ routinely exceed a million lines and require training to read.
+
+BUGS:
+- There's a crash bug in stdlib_test + reflection, rVCallConstraint.
+
+
diff --git a/llgo/third_party/go.tools/go/pointer/analysis.go b/llgo/third_party/go.tools/go/pointer/analysis.go
new file mode 100644
index 0000000000000000000000000000000000000000..a89b39fd609e1c86f7ddd21d820eac81fa376f10
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/analysis.go
@@ -0,0 +1,447 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pointer
+
+// This file defines the main datatypes and Analyze function of the pointer analysis.
+
+import (
+ "fmt"
+ "go/token"
+ "io"
+ "os"
+ "reflect"
+ "runtime"
+ "runtime/debug"
+ "sort"
+
+ "llvm.org/llgo/third_party/go.tools/go/callgraph"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llgo/third_party/go.tools/go/types/typeutil"
+)
+
+const (
+ // optimization options; enable all when committing
+ optRenumber = true // enable renumbering optimization (makes logs hard to read)
+ optHVN = true // enable pointer equivalence via Hash-Value Numbering
+
+ // debugging options; disable all when committing
+ debugHVN = false // enable assertions in HVN
+ debugHVNVerbose = false // enable extra HVN logging
+ debugHVNCrossCheck = false // run solver with/without HVN and compare (caveats below)
+ debugTimers = false // show running time of each phase
+)
+
+// object.flags bitmask values.
+const (
+ otTagged = 1 << iota // type-tagged object
+ otIndirect // type-tagged object with indirect payload
+ otFunction // function object
+)
+
+// An object represents a contiguous block of memory to which some
+// (generalized) pointer may point.
+//
+// (Note: most variables called 'obj' are not *objects but nodeids
+// such that a.nodes[obj].obj != nil.)
+//
+type object struct {
+ // flags is a bitset of the node type (ot*) flags defined above.
+ flags uint32
+
+ // Number of following nodes belonging to the same "object"
+ // allocation. Zero for all other nodes.
+ size uint32
+
+ // data describes this object; it has one of these types:
+ //
+ // ssa.Value for an object allocated by an SSA operation.
+ // types.Type for an rtype instance object or *rtype-tagged object.
+ // string for an instrinsic object, e.g. the array behind os.Args.
+ // nil for an object allocated by an instrinsic.
+ // (cgn provides the identity of the intrinsic.)
+ data interface{}
+
+ // The call-graph node (=context) in which this object was allocated.
+ // May be nil for global objects: Global, Const, some Functions.
+ cgn *cgnode
+}
+
+// nodeid denotes a node.
+// It is an index within analysis.nodes.
+// We use small integers, not *node pointers, for many reasons:
+// - they are smaller on 64-bit systems.
+// - sets of them can be represented compactly in bitvectors or BDDs.
+// - order matters; a field offset can be computed by simple addition.
+type nodeid uint32
+
+// A node is an equivalence class of memory locations.
+// Nodes may be pointers, pointed-to locations, neither, or both.
+//
+// Nodes that are pointed-to locations ("labels") have an enclosing
+// object (see analysis.enclosingObject).
+//
+type node struct {
+ // If non-nil, this node is the start of an object
+ // (addressable memory location).
+ // The following obj.size nodes implicitly belong to the object;
+ // they locate their object by scanning back.
+ obj *object
+
+ // The type of the field denoted by this node. Non-aggregate,
+ // unless this is an tagged.T node (i.e. the thing
+ // pointed to by an interface) in which case typ is that type.
+ typ types.Type
+
+ // subelement indicates which directly embedded subelement of
+ // an object of aggregate type (struct, tuple, array) this is.
+ subelement *fieldInfo // e.g. ".a.b[*].c"
+
+ // Solver state for the canonical node of this pointer-
+ // equivalence class. Each node is created with its own state
+ // but they become shared after HVN.
+ solve *solverState
+}
+
+// An analysis instance holds the state of a single pointer analysis problem.
+type analysis struct {
+ config *Config // the client's control/observer interface
+ prog *ssa.Program // the program being analyzed
+ log io.Writer // log stream; nil to disable
+ panicNode nodeid // sink for panic, source for recover
+ nodes []*node // indexed by nodeid
+ flattenMemo map[types.Type][]*fieldInfo // memoization of flatten()
+ trackTypes map[types.Type]bool // memoization of shouldTrack()
+ constraints []constraint // set of constraints
+ cgnodes []*cgnode // all cgnodes
+ genq []*cgnode // queue of functions to generate constraints for
+ intrinsics map[*ssa.Function]intrinsic // non-nil values are summaries for intrinsic fns
+ globalval map[ssa.Value]nodeid // node for each global ssa.Value
+ globalobj map[ssa.Value]nodeid // maps v to sole member of pts(v), if singleton
+ localval map[ssa.Value]nodeid // node for each local ssa.Value
+ localobj map[ssa.Value]nodeid // maps v to sole member of pts(v), if singleton
+ atFuncs map[*ssa.Function]bool // address-taken functions (for presolver)
+ mapValues []nodeid // values of makemap objects (indirect in HVN)
+ work nodeset // solver's worklist
+ result *Result // results of the analysis
+ track track // pointerlike types whose aliasing we track
+ deltaSpace []int // working space for iterating over PTS deltas
+
+ // Reflection & intrinsics:
+ hasher typeutil.Hasher // cache of type hashes
+ reflectValueObj types.Object // type symbol for reflect.Value (if present)
+ reflectValueCall *ssa.Function // (reflect.Value).Call
+ reflectRtypeObj types.Object // *types.TypeName for reflect.rtype (if present)
+ reflectRtypePtr *types.Pointer // *reflect.rtype
+ reflectType *types.Named // reflect.Type
+ rtypes typeutil.Map // nodeid of canonical *rtype-tagged object for type T
+ reflectZeros typeutil.Map // nodeid of canonical T-tagged object for zero value
+ runtimeSetFinalizer *ssa.Function // runtime.SetFinalizer
+}
+
+// enclosingObj returns the first node of the addressable memory
+// object that encloses node id. Panic ensues if that node does not
+// belong to any object.
+func (a *analysis) enclosingObj(id nodeid) nodeid {
+ // Find previous node with obj != nil.
+ for i := id; i >= 0; i-- {
+ n := a.nodes[i]
+ if obj := n.obj; obj != nil {
+ if i+nodeid(obj.size) <= id {
+ break // out of bounds
+ }
+ return i
+ }
+ }
+ panic("node has no enclosing object")
+}
+
+// labelFor returns the Label for node id.
+// Panic ensues if that node is not addressable.
+func (a *analysis) labelFor(id nodeid) *Label {
+ return &Label{
+ obj: a.nodes[a.enclosingObj(id)].obj,
+ subelement: a.nodes[id].subelement,
+ }
+}
+
+func (a *analysis) warnf(pos token.Pos, format string, args ...interface{}) {
+ msg := fmt.Sprintf(format, args...)
+ if a.log != nil {
+ fmt.Fprintf(a.log, "%s: warning: %s\n", a.prog.Fset.Position(pos), msg)
+ }
+ a.result.Warnings = append(a.result.Warnings, Warning{pos, msg})
+}
+
+// computeTrackBits sets a.track to the necessary 'track' bits for the pointer queries.
+func (a *analysis) computeTrackBits() {
+ var queryTypes []types.Type
+ for v := range a.config.Queries {
+ queryTypes = append(queryTypes, v.Type())
+ }
+ for v := range a.config.IndirectQueries {
+ queryTypes = append(queryTypes, mustDeref(v.Type()))
+ }
+ for _, t := range queryTypes {
+ switch t.Underlying().(type) {
+ case *types.Chan:
+ a.track |= trackChan
+ case *types.Map:
+ a.track |= trackMap
+ case *types.Pointer:
+ a.track |= trackPtr
+ case *types.Slice:
+ a.track |= trackSlice
+ case *types.Interface:
+ a.track = trackAll
+ return
+ }
+ if rVObj := a.reflectValueObj; rVObj != nil && types.Identical(t, rVObj.Type()) {
+ a.track = trackAll
+ return
+ }
+ }
+}
+
+// Analyze runs the pointer analysis with the scope and options
+// specified by config, and returns the (synthetic) root of the callgraph.
+//
+// Pointer analysis of a transitively closed well-typed program should
+// always succeed. An error can occur only due to an internal bug.
+//
+func Analyze(config *Config) (result *Result, err error) {
+ if config.Mains == nil {
+ return nil, fmt.Errorf("no main/test packages to analyze (check $GOROOT/$GOPATH)")
+ }
+ defer func() {
+ if p := recover(); p != nil {
+ err = fmt.Errorf("internal error in pointer analysis: %v (please report this bug)", p)
+ fmt.Fprintln(os.Stderr, "Internal panic in pointer analysis:")
+ debug.PrintStack()
+ }
+ }()
+
+ a := &analysis{
+ config: config,
+ log: config.Log,
+ prog: config.prog(),
+ globalval: make(map[ssa.Value]nodeid),
+ globalobj: make(map[ssa.Value]nodeid),
+ flattenMemo: make(map[types.Type][]*fieldInfo),
+ trackTypes: make(map[types.Type]bool),
+ atFuncs: make(map[*ssa.Function]bool),
+ hasher: typeutil.MakeHasher(),
+ intrinsics: make(map[*ssa.Function]intrinsic),
+ result: &Result{
+ Queries: make(map[ssa.Value]Pointer),
+ IndirectQueries: make(map[ssa.Value]Pointer),
+ },
+ deltaSpace: make([]int, 0, 100),
+ }
+
+ if false {
+ a.log = os.Stderr // for debugging crashes; extremely verbose
+ }
+
+ if a.log != nil {
+ fmt.Fprintln(a.log, "==== Starting analysis")
+ }
+
+ // Pointer analysis requires a complete program for soundness.
+ // Check to prevent accidental misconfiguration.
+ for _, pkg := range a.prog.AllPackages() {
+ // (This only checks that the package scope is complete,
+ // not that func bodies exist, but it's a good signal.)
+ if !pkg.Object.Complete() {
+ return nil, fmt.Errorf(`pointer analysis requires a complete program yet package %q was incomplete (set loader.Config.SourceImports during loading)`, pkg.Object.Path())
+ }
+ }
+
+ if reflect := a.prog.ImportedPackage("reflect"); reflect != nil {
+ rV := reflect.Object.Scope().Lookup("Value")
+ a.reflectValueObj = rV
+ a.reflectValueCall = a.prog.LookupMethod(rV.Type(), nil, "Call")
+ a.reflectType = reflect.Object.Scope().Lookup("Type").Type().(*types.Named)
+ a.reflectRtypeObj = reflect.Object.Scope().Lookup("rtype")
+ a.reflectRtypePtr = types.NewPointer(a.reflectRtypeObj.Type())
+
+ // Override flattening of reflect.Value, treating it like a basic type.
+ tReflectValue := a.reflectValueObj.Type()
+ a.flattenMemo[tReflectValue] = []*fieldInfo{{typ: tReflectValue}}
+
+ // Override shouldTrack of reflect.Value and *reflect.rtype.
+ // Always track pointers of these types.
+ a.trackTypes[tReflectValue] = true
+ a.trackTypes[a.reflectRtypePtr] = true
+
+ a.rtypes.SetHasher(a.hasher)
+ a.reflectZeros.SetHasher(a.hasher)
+ }
+ if runtime := a.prog.ImportedPackage("runtime"); runtime != nil {
+ a.runtimeSetFinalizer = runtime.Func("SetFinalizer")
+ }
+ a.computeTrackBits()
+
+ a.generate()
+ a.showCounts()
+
+ if optRenumber {
+ a.renumber()
+ }
+
+ N := len(a.nodes) // excludes solver-created nodes
+
+ if optHVN {
+ if debugHVNCrossCheck {
+ // Cross-check: run the solver once without
+ // optimization, once with, and compare the
+ // solutions.
+ savedConstraints := a.constraints
+
+ a.solve()
+ a.dumpSolution("A.pts", N)
+
+ // Restore.
+ a.constraints = savedConstraints
+ for _, n := range a.nodes {
+ n.solve = new(solverState)
+ }
+ a.nodes = a.nodes[:N]
+
+ // rtypes is effectively part of the solver state.
+ a.rtypes = typeutil.Map{}
+ a.rtypes.SetHasher(a.hasher)
+ }
+
+ a.hvn()
+ }
+
+ if debugHVNCrossCheck {
+ runtime.GC()
+ runtime.GC()
+ }
+
+ a.solve()
+
+ // Compare solutions.
+ if optHVN && debugHVNCrossCheck {
+ a.dumpSolution("B.pts", N)
+
+ if !diff("A.pts", "B.pts") {
+ return nil, fmt.Errorf("internal error: optimization changed solution")
+ }
+ }
+
+ // Create callgraph.Nodes in deterministic order.
+ if cg := a.result.CallGraph; cg != nil {
+ for _, caller := range a.cgnodes {
+ cg.CreateNode(caller.fn)
+ }
+ }
+
+ // Add dynamic edges to call graph.
+ var space [100]int
+ for _, caller := range a.cgnodes {
+ for _, site := range caller.sites {
+ for _, callee := range a.nodes[site.targets].solve.pts.AppendTo(space[:0]) {
+ a.callEdge(caller, site, nodeid(callee))
+ }
+ }
+ }
+
+ return a.result, nil
+}
+
+// callEdge is called for each edge in the callgraph.
+// calleeid is the callee's object node (has otFunction flag).
+//
+func (a *analysis) callEdge(caller *cgnode, site *callsite, calleeid nodeid) {
+ obj := a.nodes[calleeid].obj
+ if obj.flags&otFunction == 0 {
+ panic(fmt.Sprintf("callEdge %s -> n%d: not a function object", site, calleeid))
+ }
+ callee := obj.cgn
+
+ if cg := a.result.CallGraph; cg != nil {
+ // TODO(adonovan): opt: I would expect duplicate edges
+ // (to wrappers) to arise due to the elimination of
+ // context information, but I haven't observed any.
+ // Understand this better.
+ callgraph.AddEdge(cg.CreateNode(caller.fn), site.instr, cg.CreateNode(callee.fn))
+ }
+
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\tcall edge %s -> %s\n", site, callee)
+ }
+
+ // Warn about calls to non-intrinsic external functions.
+ // TODO(adonovan): de-dup these messages.
+ if fn := callee.fn; fn.Blocks == nil && a.findIntrinsic(fn) == nil {
+ a.warnf(site.pos(), "unsound call to unknown intrinsic: %s", fn)
+ a.warnf(fn.Pos(), " (declared here)")
+ }
+}
+
+// dumpSolution writes the PTS solution to the specified file.
+//
+// It only dumps the nodes that existed before solving. The order in
+// which solver-created nodes are created depends on pre-solver
+// optimization, so we can't include them in the cross-check.
+//
+func (a *analysis) dumpSolution(filename string, N int) {
+ f, err := os.Create(filename)
+ if err != nil {
+ panic(err)
+ }
+ for id, n := range a.nodes[:N] {
+ if _, err := fmt.Fprintf(f, "pts(n%d) = {", id); err != nil {
+ panic(err)
+ }
+ var sep string
+ for _, l := range n.solve.pts.AppendTo(a.deltaSpace) {
+ if l >= N {
+ break
+ }
+ fmt.Fprintf(f, "%s%d", sep, l)
+ sep = " "
+ }
+ fmt.Fprintf(f, "} : %s\n", n.typ)
+ }
+ if err := f.Close(); err != nil {
+ panic(err)
+ }
+}
+
+// showCounts logs the size of the constraint system. A typical
+// optimized distribution is 65% copy, 13% load, 11% addr, 5%
+// offsetAddr, 4% store, 2% others.
+//
+func (a *analysis) showCounts() {
+ if a.log != nil {
+ counts := make(map[reflect.Type]int)
+ for _, c := range a.constraints {
+ counts[reflect.TypeOf(c)]++
+ }
+ fmt.Fprintf(a.log, "# constraints:\t%d\n", len(a.constraints))
+ var lines []string
+ for t, n := range counts {
+ line := fmt.Sprintf("%7d (%2d%%)\t%s", n, 100*n/len(a.constraints), t)
+ lines = append(lines, line)
+ }
+ sort.Sort(sort.Reverse(sort.StringSlice(lines)))
+ for _, line := range lines {
+ fmt.Fprintf(a.log, "\t%s\n", line)
+ }
+
+ fmt.Fprintf(a.log, "# nodes:\t%d\n", len(a.nodes))
+
+ // Show number of pointer equivalence classes.
+ m := make(map[*solverState]bool)
+ for _, n := range a.nodes {
+ m[n.solve] = true
+ }
+ fmt.Fprintf(a.log, "# ptsets:\t%d\n", len(m))
+ }
+}
diff --git a/llgo/third_party/go.tools/go/pointer/api.go b/llgo/third_party/go.tools/go/pointer/api.go
new file mode 100644
index 0000000000000000000000000000000000000000..cd46762effa7f7aad265e43fe07d2975885843b9
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/api.go
@@ -0,0 +1,239 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pointer
+
+import (
+ "bytes"
+ "fmt"
+ "go/token"
+ "io"
+
+ "llvm.org/llgo/third_party/go.tools/container/intsets"
+ "llvm.org/llgo/third_party/go.tools/go/callgraph"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types/typeutil"
+)
+
+// A Config formulates a pointer analysis problem for Analyze().
+type Config struct {
+ // Mains contains the set of 'main' packages to analyze
+ // Clients must provide the analysis with at least one
+ // package defining a main() function.
+ Mains []*ssa.Package
+
+ // Reflection determines whether to handle reflection
+ // operators soundly, which is currently rather slow since it
+ // causes constraint to be generated during solving
+ // proportional to the number of constraint variables, which
+ // has not yet been reduced by presolver optimisation.
+ Reflection bool
+
+ // BuildCallGraph determines whether to construct a callgraph.
+ // If enabled, the graph will be available in Result.CallGraph.
+ BuildCallGraph bool
+
+ // The client populates Queries[v] or IndirectQueries[v]
+ // for each ssa.Value v of interest, to request that the
+ // points-to sets pts(v) or pts(*v) be computed. If the
+ // client needs both points-to sets, v may appear in both
+ // maps.
+ //
+ // (IndirectQueries is typically used for Values corresponding
+ // to source-level lvalues, e.g. an *ssa.Global.)
+ //
+ // The analysis populates the corresponding
+ // Result.{Indirect,}Queries map when it creates the pointer
+ // variable for v or *v. Upon completion the client can
+ // inspect that map for the results.
+ //
+ // TODO(adonovan): this API doesn't scale well for batch tools
+ // that want to dump the entire solution. Perhaps optionally
+ // populate a map[*ssa.DebugRef]Pointer in the Result, one
+ // entry per source expression.
+ //
+ Queries map[ssa.Value]struct{}
+ IndirectQueries map[ssa.Value]struct{}
+
+ // If Log is non-nil, log messages are written to it.
+ // Logging is extremely verbose.
+ Log io.Writer
+}
+
+type track uint32
+
+const (
+ trackChan track = 1 << iota // track 'chan' references
+ trackMap // track 'map' references
+ trackPtr // track regular pointers
+ trackSlice // track slice references
+
+ trackAll = ^track(0)
+)
+
+// AddQuery adds v to Config.Queries.
+// Precondition: CanPoint(v.Type()).
+// TODO(adonovan): consider returning a new Pointer for this query,
+// which will be initialized during analysis. That avoids the needs
+// for the corresponding ssa.Value-keyed maps in Config and Result.
+func (c *Config) AddQuery(v ssa.Value) {
+ if !CanPoint(v.Type()) {
+ panic(fmt.Sprintf("%s is not a pointer-like value: %s", v, v.Type()))
+ }
+ if c.Queries == nil {
+ c.Queries = make(map[ssa.Value]struct{})
+ }
+ c.Queries[v] = struct{}{}
+}
+
+// AddQuery adds v to Config.IndirectQueries.
+// Precondition: CanPoint(v.Type().Underlying().(*types.Pointer).Elem()).
+func (c *Config) AddIndirectQuery(v ssa.Value) {
+ if c.IndirectQueries == nil {
+ c.IndirectQueries = make(map[ssa.Value]struct{})
+ }
+ if !CanPoint(mustDeref(v.Type())) {
+ panic(fmt.Sprintf("%s is not the address of a pointer-like value: %s", v, v.Type()))
+ }
+ c.IndirectQueries[v] = struct{}{}
+}
+
+func (c *Config) prog() *ssa.Program {
+ for _, main := range c.Mains {
+ return main.Prog
+ }
+ panic("empty scope")
+}
+
+type Warning struct {
+ Pos token.Pos
+ Message string
+}
+
+// A Result contains the results of a pointer analysis.
+//
+// See Config for how to request the various Result components.
+//
+type Result struct {
+ CallGraph *callgraph.Graph // discovered call graph
+ Queries map[ssa.Value]Pointer // pts(v) for each v in Config.Queries.
+ IndirectQueries map[ssa.Value]Pointer // pts(*v) for each v in Config.IndirectQueries.
+ Warnings []Warning // warnings of unsoundness
+}
+
+// A Pointer is an equivalence class of pointer-like values.
+//
+// A Pointer doesn't have a unique type because pointers of distinct
+// types may alias the same object.
+//
+type Pointer struct {
+ a *analysis
+ n nodeid
+}
+
+// A PointsToSet is a set of labels (locations or allocations).
+type PointsToSet struct {
+ a *analysis // may be nil if pts is nil
+ pts *nodeset
+}
+
+func (s PointsToSet) String() string {
+ var buf bytes.Buffer
+ buf.WriteByte('[')
+ if s.pts != nil {
+ var space [50]int
+ for i, l := range s.pts.AppendTo(space[:0]) {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(s.a.labelFor(nodeid(l)).String())
+ }
+ }
+ buf.WriteByte(']')
+ return buf.String()
+}
+
+// PointsTo returns the set of labels that this points-to set
+// contains.
+func (s PointsToSet) Labels() []*Label {
+ var labels []*Label
+ if s.pts != nil {
+ var space [50]int
+ for _, l := range s.pts.AppendTo(space[:0]) {
+ labels = append(labels, s.a.labelFor(nodeid(l)))
+ }
+ }
+ return labels
+}
+
+// If this PointsToSet came from a Pointer of interface kind
+// or a reflect.Value, DynamicTypes returns the set of dynamic
+// types that it may contain. (For an interface, they will
+// always be concrete types.)
+//
+// The result is a mapping whose keys are the dynamic types to which
+// it may point. For each pointer-like key type, the corresponding
+// map value is the PointsToSet for pointers of that type.
+//
+// The result is empty unless CanHaveDynamicTypes(T).
+//
+func (s PointsToSet) DynamicTypes() *typeutil.Map {
+ var tmap typeutil.Map
+ tmap.SetHasher(s.a.hasher)
+ if s.pts != nil {
+ var space [50]int
+ for _, x := range s.pts.AppendTo(space[:0]) {
+ ifaceObjId := nodeid(x)
+ if !s.a.isTaggedObject(ifaceObjId) {
+ continue // !CanHaveDynamicTypes(tDyn)
+ }
+ tDyn, v, indirect := s.a.taggedValue(ifaceObjId)
+ if indirect {
+ panic("indirect tagged object") // implement later
+ }
+ pts, ok := tmap.At(tDyn).(PointsToSet)
+ if !ok {
+ pts = PointsToSet{s.a, new(nodeset)}
+ tmap.Set(tDyn, pts)
+ }
+ pts.pts.addAll(&s.a.nodes[v].solve.pts)
+ }
+ }
+ return &tmap
+}
+
+// Intersects reports whether this points-to set and the
+// argument points-to set contain common members.
+func (x PointsToSet) Intersects(y PointsToSet) bool {
+ if x.pts == nil || y.pts == nil {
+ return false
+ }
+ // This takes Θ(|x|+|y|) time.
+ var z intsets.Sparse
+ z.Intersection(&x.pts.Sparse, &y.pts.Sparse)
+ return !z.IsEmpty()
+}
+
+func (p Pointer) String() string {
+ return fmt.Sprintf("n%d", p.n)
+}
+
+// PointsTo returns the points-to set of this pointer.
+func (p Pointer) PointsTo() PointsToSet {
+ if p.n == 0 {
+ return PointsToSet{}
+ }
+ return PointsToSet{p.a, &p.a.nodes[p.n].solve.pts}
+}
+
+// MayAlias reports whether the receiver pointer may alias
+// the argument pointer.
+func (p Pointer) MayAlias(q Pointer) bool {
+ return p.PointsTo().Intersects(q.PointsTo())
+}
+
+// DynamicTypes returns p.PointsTo().DynamicTypes().
+func (p Pointer) DynamicTypes() *typeutil.Map {
+ return p.PointsTo().DynamicTypes()
+}
diff --git a/llgo/third_party/go.tools/go/pointer/callgraph.go b/llgo/third_party/go.tools/go/pointer/callgraph.go
new file mode 100644
index 0000000000000000000000000000000000000000..a2bd95e54f71da7c434b752d2fe0dfbad1ddcff3
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/callgraph.go
@@ -0,0 +1,61 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pointer
+
+// This file defines the internal (context-sensitive) call graph.
+
+import (
+ "fmt"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+)
+
+type cgnode struct {
+ fn *ssa.Function
+ obj nodeid // start of this contour's object block
+ sites []*callsite // ordered list of callsites within this function
+ callersite *callsite // where called from, if known; nil for shared contours
+}
+
+// contour returns a description of this node's contour.
+func (n *cgnode) contour() string {
+ if n.callersite == nil {
+ return "shared contour"
+ }
+ if n.callersite.instr != nil {
+ return fmt.Sprintf("as called from %s", n.callersite.instr.Parent())
+ }
+ return fmt.Sprintf("as called from intrinsic (targets=n%d)", n.callersite.targets)
+}
+
+func (n *cgnode) String() string {
+ return fmt.Sprintf("cg%d:%s", n.obj, n.fn)
+}
+
+// A callsite represents a single call site within a cgnode;
+// it is implicitly context-sensitive.
+// callsites never represent calls to built-ins;
+// they are handled as intrinsics.
+//
+type callsite struct {
+ targets nodeid // pts(·) contains objects for dynamically called functions
+ instr ssa.CallInstruction // the call instruction; nil for synthetic/intrinsic
+}
+
+func (c *callsite) String() string {
+ if c.instr != nil {
+ return c.instr.Common().Description()
+ }
+ return "synthetic function call"
+}
+
+// pos returns the source position of this callsite, or token.NoPos if implicit.
+func (c *callsite) pos() token.Pos {
+ if c.instr != nil {
+ return c.instr.Pos()
+ }
+ return token.NoPos
+}
diff --git a/llgo/third_party/go.tools/go/pointer/constraint.go b/llgo/third_party/go.tools/go/pointer/constraint.go
new file mode 100644
index 0000000000000000000000000000000000000000..6b685cb369bee86e3a1d577817309c2bb109bcda
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/constraint.go
@@ -0,0 +1,151 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pointer
+
+import (
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+type constraint interface {
+ // For a complex constraint, returns the nodeid of the pointer
+ // to which it is attached. For addr and copy, returns dst.
+ ptr() nodeid
+
+ // renumber replaces each nodeid n in the constraint by mapping[n].
+ renumber(mapping []nodeid)
+
+ // presolve is a hook for constraint-specific behaviour during
+ // pre-solver optimization. Typical implementations mark as
+ // indirect the set of nodes to which the solver will add copy
+ // edges or PTS labels.
+ presolve(h *hvn)
+
+ // solve is called for complex constraints when the pts for
+ // the node to which they are attached has changed.
+ solve(a *analysis, delta *nodeset)
+
+ String() string
+}
+
+// dst = &src
+// pts(dst) ⊇ {src}
+// A base constraint used to initialize the solver's pt sets
+type addrConstraint struct {
+ dst nodeid // (ptr)
+ src nodeid
+}
+
+func (c *addrConstraint) ptr() nodeid { return c.dst }
+func (c *addrConstraint) renumber(mapping []nodeid) {
+ c.dst = mapping[c.dst]
+ c.src = mapping[c.src]
+}
+
+// dst = src
+// A simple constraint represented directly as a copyTo graph edge.
+type copyConstraint struct {
+ dst nodeid // (ptr)
+ src nodeid
+}
+
+func (c *copyConstraint) ptr() nodeid { return c.dst }
+func (c *copyConstraint) renumber(mapping []nodeid) {
+ c.dst = mapping[c.dst]
+ c.src = mapping[c.src]
+}
+
+// dst = src[offset]
+// A complex constraint attached to src (the pointer)
+type loadConstraint struct {
+ offset uint32
+ dst nodeid
+ src nodeid // (ptr)
+}
+
+func (c *loadConstraint) ptr() nodeid { return c.src }
+func (c *loadConstraint) renumber(mapping []nodeid) {
+ c.dst = mapping[c.dst]
+ c.src = mapping[c.src]
+}
+
+// dst[offset] = src
+// A complex constraint attached to dst (the pointer)
+type storeConstraint struct {
+ offset uint32
+ dst nodeid // (ptr)
+ src nodeid
+}
+
+func (c *storeConstraint) ptr() nodeid { return c.dst }
+func (c *storeConstraint) renumber(mapping []nodeid) {
+ c.dst = mapping[c.dst]
+ c.src = mapping[c.src]
+}
+
+// dst = &src.f or dst = &src[0]
+// A complex constraint attached to dst (the pointer)
+type offsetAddrConstraint struct {
+ offset uint32
+ dst nodeid
+ src nodeid // (ptr)
+}
+
+func (c *offsetAddrConstraint) ptr() nodeid { return c.src }
+func (c *offsetAddrConstraint) renumber(mapping []nodeid) {
+ c.dst = mapping[c.dst]
+ c.src = mapping[c.src]
+}
+
+// dst = src.(typ) where typ is an interface
+// A complex constraint attached to src (the interface).
+// No representation change: pts(dst) and pts(src) contains tagged objects.
+type typeFilterConstraint struct {
+ typ types.Type // an interface type
+ dst nodeid
+ src nodeid // (ptr)
+}
+
+func (c *typeFilterConstraint) ptr() nodeid { return c.src }
+func (c *typeFilterConstraint) renumber(mapping []nodeid) {
+ c.dst = mapping[c.dst]
+ c.src = mapping[c.src]
+}
+
+// dst = src.(typ) where typ is a concrete type
+// A complex constraint attached to src (the interface).
+//
+// If exact, only tagged objects identical to typ are untagged.
+// If !exact, tagged objects assignable to typ are untagged too.
+// The latter is needed for various reflect operators, e.g. Send.
+//
+// This entails a representation change:
+// pts(src) contains tagged objects,
+// pts(dst) contains their payloads.
+type untagConstraint struct {
+ typ types.Type // a concrete type
+ dst nodeid
+ src nodeid // (ptr)
+ exact bool
+}
+
+func (c *untagConstraint) ptr() nodeid { return c.src }
+func (c *untagConstraint) renumber(mapping []nodeid) {
+ c.dst = mapping[c.dst]
+ c.src = mapping[c.src]
+}
+
+// src.method(params...)
+// A complex constraint attached to iface.
+type invokeConstraint struct {
+ method *types.Func // the abstract method
+ iface nodeid // (ptr) the interface
+ params nodeid // the start of the identity/params/results block
+}
+
+func (c *invokeConstraint) ptr() nodeid { return c.iface }
+func (c *invokeConstraint) renumber(mapping []nodeid) {
+ c.iface = mapping[c.iface]
+ c.params = mapping[c.params]
+}
diff --git a/llgo/third_party/go.tools/go/pointer/doc.go b/llgo/third_party/go.tools/go/pointer/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..00bf2a4e1e9afa5b2ed337b4b714e0bb1c410c34
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/doc.go
@@ -0,0 +1,610 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+
+Package pointer implements Andersen's analysis, an inclusion-based
+pointer analysis algorithm first described in (Andersen, 1994).
+
+A pointer analysis relates every pointer expression in a whole program
+to the set of memory locations to which it might point. This
+information can be used to construct a call graph of the program that
+precisely represents the destinations of dynamic function and method
+calls. It can also be used to determine, for example, which pairs of
+channel operations operate on the same channel.
+
+The package allows the client to request a set of expressions of
+interest for which the points-to information will be returned once the
+analysis is complete. In addition, the client may request that a
+callgraph is constructed. The example program in example_test.go
+demonstrates both of these features. Clients should not request more
+information than they need since it may increase the cost of the
+analysis significantly.
+
+
+CLASSIFICATION
+
+Our algorithm is INCLUSION-BASED: the points-to sets for x and y will
+be related by pts(y) ⊇ pts(x) if the program contains the statement
+y = x.
+
+It is FLOW-INSENSITIVE: it ignores all control flow constructs and the
+order of statements in a program. It is therefore a "MAY ALIAS"
+analysis: its facts are of the form "P may/may not point to L",
+not "P must point to L".
+
+It is FIELD-SENSITIVE: it builds separate points-to sets for distinct
+fields, such as x and y in struct { x, y *int }.
+
+It is mostly CONTEXT-INSENSITIVE: most functions are analyzed once,
+so values can flow in at one call to the function and return out at
+another. Only some smaller functions are analyzed with consideration
+of their calling context.
+
+It has a CONTEXT-SENSITIVE HEAP: objects are named by both allocation
+site and context, so the objects returned by two distinct calls to f:
+ func f() *T { return new(T) }
+are distinguished up to the limits of the calling context.
+
+It is a WHOLE PROGRAM analysis: it requires SSA-form IR for the
+complete Go program and summaries for native code.
+
+See the (Hind, PASTE'01) survey paper for an explanation of these terms.
+
+
+SOUNDNESS
+
+The analysis is fully sound when invoked on pure Go programs that do not
+use reflection or unsafe.Pointer conversions. In other words, if there
+is any possible execution of the program in which pointer P may point to
+object O, the analysis will report that fact.
+
+
+REFLECTION
+
+By default, the "reflect" library is ignored by the analysis, as if all
+its functions were no-ops, but if the client enables the Reflection flag,
+the analysis will make a reasonable attempt to model the effects of
+calls into this library. However, this comes at a significant
+performance cost, and not all features of that library are yet
+implemented. In addition, some simplifying approximations must be made
+to ensure that the analysis terminates; for example, reflection can be
+used to construct an infinite set of types and values of those types,
+but the analysis arbitrarily bounds the depth of such types.
+
+Most but not all reflection operations are supported.
+In particular, addressable reflect.Values are not yet implemented, so
+operations such as (reflect.Value).Set have no analytic effect.
+
+
+UNSAFE POINTER CONVERSIONS
+
+The pointer analysis makes no attempt to understand aliasing between the
+operand x and result y of an unsafe.Pointer conversion:
+ y = (*T)(unsafe.Pointer(x))
+It is as if the conversion allocated an entirely new object:
+ y = new(T)
+
+
+NATIVE CODE
+
+The analysis cannot model the aliasing effects of functions written in
+languages other than Go, such as runtime intrinsics in C or assembly, or
+code accessed via cgo. The result is as if such functions are no-ops.
+However, various important intrinsics are understood by the analysis,
+along with built-ins such as append.
+
+The analysis currently provides no way for users to specify the aliasing
+effects of native code.
+
+------------------------------------------------------------------------
+
+IMPLEMENTATION
+
+The remaining documentation is intended for package maintainers and
+pointer analysis specialists. Maintainers should have a solid
+understanding of the referenced papers (especially those by H&L and PKH)
+before making making significant changes.
+
+The implementation is similar to that described in (Pearce et al,
+PASTE'04). Unlike many algorithms which interleave constraint
+generation and solving, constructing the callgraph as they go, this
+implementation for the most part observes a phase ordering (generation
+before solving), with only simple (copy) constraints being generated
+during solving. (The exception is reflection, which creates various
+constraints during solving as new types flow to reflect.Value
+operations.) This improves the traction of presolver optimisations,
+but imposes certain restrictions, e.g. potential context sensitivity
+is limited since all variants must be created a priori.
+
+
+TERMINOLOGY
+
+A type is said to be "pointer-like" if it is a reference to an object.
+Pointer-like types include pointers and also interfaces, maps, channels,
+functions and slices.
+
+We occasionally use C's x->f notation to distinguish the case where x
+is a struct pointer from x.f where is a struct value.
+
+Pointer analysis literature (and our comments) often uses the notation
+dst=*src+offset to mean something different than what it means in Go.
+It means: for each node index p in pts(src), the node index p+offset is
+in pts(dst). Similarly *dst+offset=src is used for store constraints
+and dst=src+offset for offset-address constraints.
+
+
+NODES
+
+Nodes are the key datastructure of the analysis, and have a dual role:
+they represent both constraint variables (equivalence classes of
+pointers) and members of points-to sets (things that can be pointed
+at, i.e. "labels").
+
+Nodes are naturally numbered. The numbering enables compact
+representations of sets of nodes such as bitvectors (or BDDs); and the
+ordering enables a very cheap way to group related nodes together. For
+example, passing n parameters consists of generating n parallel
+constraints from caller+i to callee+i for 0<=i y is added.
+
+ ChangeInterface is a simple copy because the representation of
+ tagged objects is independent of the interface type (in contrast
+ to the "method tables" approach used by the gc runtime).
+
+ y := Invoke x.m(...) is implemented by allocating contiguous P/R
+ blocks for the callsite and adding a dynamic rule triggered by each
+ tagged object added to pts(x). The rule adds param/results copy
+ edges to/from each discovered concrete method.
+
+ (Q. Why do we model an interface as a pointer to a pair of type and
+ value, rather than as a pair of a pointer to type and a pointer to
+ value?
+ A. Control-flow joins would merge interfaces ({T1}, {V1}) and ({T2},
+ {V2}) to make ({T1,T2}, {V1,V2}), leading to the infeasible and
+ type-unsafe combination (T1,V2). Treating the value and its concrete
+ type as inseparable makes the analysis type-safe.)
+
+reflect.Value
+ A reflect.Value is modelled very similar to an interface{}, i.e. as
+ a pointer exclusively to tagged objects, but with two generalizations.
+
+ 1) a reflect.Value that represents an lvalue points to an indirect
+ (obj.flags ⊇ {otIndirect}) tagged object, which has a similar
+ layout to an tagged object except that the value is a pointer to
+ the dynamic type. Indirect tagged objects preserve the correct
+ aliasing so that mutations made by (reflect.Value).Set can be
+ observed.
+
+ Indirect objects only arise when an lvalue is derived from an
+ rvalue by indirection, e.g. the following code:
+
+ type S struct { X T }
+ var s S
+ var i interface{} = &s // i points to a *S-tagged object (from MakeInterface)
+ v1 := reflect.ValueOf(i) // v1 points to same *S-tagged object as i
+ v2 := v1.Elem() // v2 points to an indirect S-tagged object, pointing to s
+ v3 := v2.FieldByName("X") // v3 points to an indirect int-tagged object, pointing to s.X
+ v3.Set(y) // pts(s.X) ⊇ pts(y)
+
+ Whether indirect or not, the concrete type of the tagged object
+ corresponds to the user-visible dynamic type, and the existence
+ of a pointer is an implementation detail.
+
+ (NB: indirect tagged objects are not yet implemented)
+
+ 2) The dynamic type tag of a tagged object pointed to by a
+ reflect.Value may be an interface type; it need not be concrete.
+
+ This arises in code such as this:
+ tEface := reflect.TypeOf(new(interface{}).Elem() // interface{}
+ eface := reflect.Zero(tEface)
+ pts(eface) is a singleton containing an interface{}-tagged
+ object. That tagged object's payload is an interface{} value,
+ i.e. the pts of the payload contains only concrete-tagged
+ objects, although in this example it's the zero interface{} value,
+ so its pts is empty.
+
+reflect.Type
+ Just as in the real "reflect" library, we represent a reflect.Type
+ as an interface whose sole implementation is the concrete type,
+ *reflect.rtype. (This choice is forced on us by go/types: clients
+ cannot fabricate types with arbitrary method sets.)
+
+ rtype instances are canonical: there is at most one per dynamic
+ type. (rtypes are in fact large structs but since identity is all
+ that matters, we represent them by a single node.)
+
+ The payload of each *rtype-tagged object is an *rtype pointer that
+ points to exactly one such canonical rtype object. We exploit this
+ by setting the node.typ of the payload to the dynamic type, not
+ '*rtype'. This saves us an indirection in each resolution rule. As
+ an optimisation, *rtype-tagged objects are canonicalized too.
+
+
+Aggregate types:
+
+Aggregate types are treated as if all directly contained
+aggregates are recursively flattened out.
+
+Structs
+ *ssa.Field y = x.f creates a simple edge to y from x's node at f's offset.
+
+ *ssa.FieldAddr y = &x->f requires a dynamic closure rule to create
+ simple edges for each struct discovered in pts(x).
+
+ The nodes of a struct consist of a special 'identity' node (whose
+ type is that of the struct itself), followed by the nodes for all
+ the struct's fields, recursively flattened out. A pointer to the
+ struct is a pointer to its identity node. That node allows us to
+ distinguish a pointer to a struct from a pointer to its first field.
+
+ Field offsets are logical field offsets (plus one for the identity
+ node), so the sizes of the fields can be ignored by the analysis.
+
+ (The identity node is non-traditional but enables the distiction
+ described above, which is valuable for code comprehension tools.
+ Typical pointer analyses for C, whose purpose is compiler
+ optimization, must soundly model unsafe.Pointer (void*) conversions,
+ and this requires fidelity to the actual memory layout using physical
+ field offsets.)
+
+ *ssa.Field y = x.f creates a simple edge to y from x's node at f's offset.
+
+ *ssa.FieldAddr y = &x->f requires a dynamic closure rule to create
+ simple edges for each struct discovered in pts(x).
+
+Arrays
+ We model an array by an identity node (whose type is that of the
+ array itself) followed by a node representing all the elements of
+ the array; the analysis does not distinguish elements with different
+ indices. Effectively, an array is treated like struct{elem T}, a
+ load y=x[i] like y=x.elem, and a store x[i]=y like x.elem=y; the
+ index i is ignored.
+
+ A pointer to an array is pointer to its identity node. (A slice is
+ also a pointer to an array's identity node.) The identity node
+ allows us to distinguish a pointer to an array from a pointer to one
+ of its elements, but it is rather costly because it introduces more
+ offset constraints into the system. Furthermore, sound treatment of
+ unsafe.Pointer would require us to dispense with this node.
+
+ Arrays may be allocated by Alloc, by make([]T), by calls to append,
+ and via reflection.
+
+Tuples (T, ...)
+ Tuples are treated like structs with naturally numbered fields.
+ *ssa.Extract is analogous to *ssa.Field.
+
+ However, tuples have no identity field since by construction, they
+ cannot be address-taken.
+
+
+FUNCTION CALLS
+
+ There are three kinds of function call:
+ (1) static "call"-mode calls of functions.
+ (2) dynamic "call"-mode calls of functions.
+ (3) dynamic "invoke"-mode calls of interface methods.
+ Cases 1 and 2 apply equally to methods and standalone functions.
+
+ Static calls.
+ A static call consists three steps:
+ - finding the function object of the callee;
+ - creating copy edges from the actual parameter value nodes to the
+ P-block in the function object (this includes the receiver if
+ the callee is a method);
+ - creating copy edges from the R-block in the function object to
+ the value nodes for the result of the call.
+
+ A static function call is little more than two struct value copies
+ between the P/R blocks of caller and callee:
+
+ callee.P = caller.P
+ caller.R = callee.R
+
+ Context sensitivity
+
+ Static calls (alone) may be treated context sensitively,
+ i.e. each callsite may cause a distinct re-analysis of the
+ callee, improving precision. Our current context-sensitivity
+ policy treats all intrinsics and getter/setter methods in this
+ manner since such functions are small and seem like an obvious
+ source of spurious confluences, though this has not yet been
+ evaluated.
+
+ Dynamic function calls
+
+ Dynamic calls work in a similar manner except that the creation of
+ copy edges occurs dynamically, in a similar fashion to a pair of
+ struct copies in which the callee is indirect:
+
+ callee->P = caller.P
+ caller.R = callee->R
+
+ (Recall that the function object's P- and R-blocks are contiguous.)
+
+ Interface method invocation
+
+ For invoke-mode calls, we create a params/results block for the
+ callsite and attach a dynamic closure rule to the interface. For
+ each new tagged object that flows to the interface, we look up
+ the concrete method, find its function object, and connect its P/R
+ blocks to the callsite's P/R blocks, adding copy edges to the graph
+ during solving.
+
+ Recording call targets
+
+ The analysis notifies its clients of each callsite it encounters,
+ passing a CallSite interface. Among other things, the CallSite
+ contains a synthetic constraint variable ("targets") whose
+ points-to solution includes the set of all function objects to
+ which the call may dispatch.
+
+ It is via this mechanism that the callgraph is made available.
+ Clients may also elect to be notified of callgraph edges directly;
+ internally this just iterates all "targets" variables' pts(·)s.
+
+
+PRESOLVER
+
+We implement Hash-Value Numbering (HVN), a pre-solver constraint
+optimization described in Hardekopf & Lin, SAS'07. This is documented
+in more detail in hvn.go. We intend to add its cousins HR and HU in
+future.
+
+
+SOLVER
+
+The solver is currently a naive Andersen-style implementation; it does
+not perform online cycle detection, though we plan to add solver
+optimisations such as Hybrid- and Lazy- Cycle Detection from (Hardekopf
+& Lin, PLDI'07).
+
+It uses difference propagation (Pearce et al, SQC'04) to avoid
+redundant re-triggering of closure rules for values already seen.
+
+Points-to sets are represented using sparse bit vectors (similar to
+those used in LLVM and gcc), which are more space- and time-efficient
+than sets based on Go's built-in map type or dense bit vectors.
+
+Nodes are permuted prior to solving so that object nodes (which may
+appear in points-to sets) are lower numbered than non-object (var)
+nodes. This improves the density of the set over which the PTSs
+range, and thus the efficiency of the representation.
+
+Partly thanks to avoiding map iteration, the execution of the solver is
+100% deterministic, a great help during debugging.
+
+
+FURTHER READING
+
+Andersen, L. O. 1994. Program analysis and specialization for the C
+programming language. Ph.D. dissertation. DIKU, University of
+Copenhagen.
+
+David J. Pearce, Paul H. J. Kelly, and Chris Hankin. 2004. Efficient
+field-sensitive pointer analysis for C. In Proceedings of the 5th ACM
+SIGPLAN-SIGSOFT workshop on Program analysis for software tools and
+engineering (PASTE '04). ACM, New York, NY, USA, 37-42.
+http://doi.acm.org/10.1145/996821.996835
+
+David J. Pearce, Paul H. J. Kelly, and Chris Hankin. 2004. Online
+Cycle Detection and Difference Propagation: Applications to Pointer
+Analysis. Software Quality Control 12, 4 (December 2004), 311-337.
+http://dx.doi.org/10.1023/B:SQJO.0000039791.93071.a2
+
+David Grove and Craig Chambers. 2001. A framework for call graph
+construction algorithms. ACM Trans. Program. Lang. Syst. 23, 6
+(November 2001), 685-746.
+http://doi.acm.org/10.1145/506315.506316
+
+Ben Hardekopf and Calvin Lin. 2007. The ant and the grasshopper: fast
+and accurate pointer analysis for millions of lines of code. In
+Proceedings of the 2007 ACM SIGPLAN conference on Programming language
+design and implementation (PLDI '07). ACM, New York, NY, USA, 290-299.
+http://doi.acm.org/10.1145/1250734.1250767
+
+Ben Hardekopf and Calvin Lin. 2007. Exploiting pointer and location
+equivalence to optimize pointer analysis. In Proceedings of the 14th
+international conference on Static Analysis (SAS'07), Hanne Riis
+Nielson and Gilberto Filé (Eds.). Springer-Verlag, Berlin, Heidelberg,
+265-280.
+
+Atanas Rountev and Satish Chandra. 2000. Off-line variable substitution
+for scaling points-to analysis. In Proceedings of the ACM SIGPLAN 2000
+conference on Programming language design and implementation (PLDI '00).
+ACM, New York, NY, USA, 47-56. DOI=10.1145/349299.349310
+http://doi.acm.org/10.1145/349299.349310
+
+*/
+package pointer
diff --git a/llgo/third_party/go.tools/go/pointer/example_test.go b/llgo/third_party/go.tools/go/pointer/example_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..98a61fd22caf8b4b0919a7376f8c53be37ff8f16
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/example_test.go
@@ -0,0 +1,125 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pointer_test
+
+import (
+ "fmt"
+ "sort"
+
+ "llvm.org/llgo/third_party/go.tools/go/callgraph"
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/pointer"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+)
+
+// This program demonstrates how to use the pointer analysis to
+// obtain a conservative call-graph of a Go program.
+// It also shows how to compute the points-to set of a variable,
+// in this case, (C).f's ch parameter.
+//
+func Example() {
+ const myprog = `
+package main
+
+import "fmt"
+
+type I interface {
+ f(map[string]int)
+}
+
+type C struct{}
+
+func (C) f(m map[string]int) {
+ fmt.Println("C.f()")
+}
+
+func main() {
+ var i I = C{}
+ x := map[string]int{"one":1}
+ i.f(x) // dynamic method call
+}
+`
+ // Construct a loader.
+ conf := loader.Config{SourceImports: true}
+
+ // Parse the input file.
+ file, err := conf.ParseFile("myprog.go", myprog)
+ if err != nil {
+ fmt.Print(err) // parse error
+ return
+ }
+
+ // Create single-file main package and import its dependencies.
+ conf.CreateFromFiles("main", file)
+
+ iprog, err := conf.Load()
+ if err != nil {
+ fmt.Print(err) // type error in some package
+ return
+ }
+
+ // Create SSA-form program representation.
+ prog := ssa.Create(iprog, 0)
+ mainPkg := prog.Package(iprog.Created[0].Pkg)
+
+ // Build SSA code for bodies of all functions in the whole program.
+ prog.BuildAll()
+
+ // Configure the pointer analysis to build a call-graph.
+ config := &pointer.Config{
+ Mains: []*ssa.Package{mainPkg},
+ BuildCallGraph: true,
+ }
+
+ // Query points-to set of (C).f's parameter m, a map.
+ C := mainPkg.Type("C").Type()
+ Cfm := prog.LookupMethod(C, mainPkg.Object, "f").Params[1]
+ config.AddQuery(Cfm)
+
+ // Run the pointer analysis.
+ result, err := pointer.Analyze(config)
+ if err != nil {
+ panic(err) // internal error in pointer analysis
+ }
+
+ // Find edges originating from the main package.
+ // By converting to strings, we de-duplicate nodes
+ // representing the same function due to context sensitivity.
+ var edges []string
+ callgraph.GraphVisitEdges(result.CallGraph, func(edge *callgraph.Edge) error {
+ caller := edge.Caller.Func
+ if caller.Pkg == mainPkg {
+ edges = append(edges, fmt.Sprint(caller, " --> ", edge.Callee.Func))
+ }
+ return nil
+ })
+
+ // Print the edges in sorted order.
+ sort.Strings(edges)
+ for _, edge := range edges {
+ fmt.Println(edge)
+ }
+ fmt.Println()
+
+ // Print the labels of (C).f(m)'s points-to set.
+ fmt.Println("m may point to:")
+ var labels []string
+ for _, l := range result.Queries[Cfm].PointsTo().Labels() {
+ label := fmt.Sprintf(" %s: %s", prog.Fset.Position(l.Pos()), l)
+ labels = append(labels, label)
+ }
+ sort.Strings(labels)
+ for _, label := range labels {
+ fmt.Println(label)
+ }
+
+ // Output:
+ // (main.C).f --> fmt.Println
+ // main.init --> fmt.init
+ // main.main --> (main.C).f
+ //
+ // m may point to:
+ // myprog.go:18:21: makemap
+}
diff --git a/llgo/third_party/go.tools/go/pointer/gen.go b/llgo/third_party/go.tools/go/pointer/gen.go
new file mode 100644
index 0000000000000000000000000000000000000000..1f78111d88cbbc860e85dd8101deb25f58f0a2d3
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/gen.go
@@ -0,0 +1,1290 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pointer
+
+// This file defines the constraint generation phase.
+
+// TODO(adonovan): move the constraint definitions and the store() etc
+// functions which add them (and are also used by the solver) into a
+// new file, constraints.go.
+
+import (
+ "fmt"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/callgraph"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+var (
+ tEface = types.NewInterface(nil, nil).Complete()
+ tInvalid = types.Typ[types.Invalid]
+ tUnsafePtr = types.Typ[types.UnsafePointer]
+)
+
+// ---------- Node creation ----------
+
+// nextNode returns the index of the next unused node.
+func (a *analysis) nextNode() nodeid {
+ return nodeid(len(a.nodes))
+}
+
+// addNodes creates nodes for all scalar elements in type typ, and
+// returns the id of the first one, or zero if the type was
+// analytically uninteresting.
+//
+// comment explains the origin of the nodes, as a debugging aid.
+//
+func (a *analysis) addNodes(typ types.Type, comment string) nodeid {
+ id := a.nextNode()
+ for _, fi := range a.flatten(typ) {
+ a.addOneNode(fi.typ, comment, fi)
+ }
+ if id == a.nextNode() {
+ return 0 // type contained no pointers
+ }
+ return id
+}
+
+// addOneNode creates a single node with type typ, and returns its id.
+//
+// typ should generally be scalar (except for tagged.T nodes
+// and struct/array identity nodes). Use addNodes for non-scalar types.
+//
+// comment explains the origin of the nodes, as a debugging aid.
+// subelement indicates the subelement, e.g. ".a.b[*].c".
+//
+func (a *analysis) addOneNode(typ types.Type, comment string, subelement *fieldInfo) nodeid {
+ id := a.nextNode()
+ a.nodes = append(a.nodes, &node{typ: typ, subelement: subelement, solve: new(solverState)})
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\tcreate n%d %s for %s%s\n",
+ id, typ, comment, subelement.path())
+ }
+ return id
+}
+
+// setValueNode associates node id with the value v.
+// cgn identifies the context iff v is a local variable.
+//
+func (a *analysis) setValueNode(v ssa.Value, id nodeid, cgn *cgnode) {
+ if cgn != nil {
+ a.localval[v] = id
+ } else {
+ a.globalval[v] = id
+ }
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\tval[%s] = n%d (%T)\n", v.Name(), id, v)
+ }
+
+ // Due to context-sensitivity, we may encounter the same Value
+ // in many contexts. We merge them to a canonical node, since
+ // that's what all clients want.
+
+ // Record the (v, id) relation if the client has queried pts(v).
+ if _, ok := a.config.Queries[v]; ok {
+ t := v.Type()
+ ptr, ok := a.result.Queries[v]
+ if !ok {
+ // First time? Create the canonical query node.
+ ptr = Pointer{a, a.addNodes(t, "query")}
+ a.result.Queries[v] = ptr
+ }
+ a.result.Queries[v] = ptr
+ a.copy(ptr.n, id, a.sizeof(t))
+ }
+
+ // Record the (*v, id) relation if the client has queried pts(*v).
+ if _, ok := a.config.IndirectQueries[v]; ok {
+ t := v.Type()
+ ptr, ok := a.result.IndirectQueries[v]
+ if !ok {
+ // First time? Create the canonical indirect query node.
+ ptr = Pointer{a, a.addNodes(v.Type(), "query.indirect")}
+ a.result.IndirectQueries[v] = ptr
+ }
+ a.genLoad(cgn, ptr.n, v, 0, a.sizeof(t))
+ }
+}
+
+// endObject marks the end of a sequence of calls to addNodes denoting
+// a single object allocation.
+//
+// obj is the start node of the object, from a prior call to nextNode.
+// Its size, flags and optional data will be updated.
+//
+func (a *analysis) endObject(obj nodeid, cgn *cgnode, data interface{}) *object {
+ // Ensure object is non-empty by padding;
+ // the pad will be the object node.
+ size := uint32(a.nextNode() - obj)
+ if size == 0 {
+ a.addOneNode(tInvalid, "padding", nil)
+ }
+ objNode := a.nodes[obj]
+ o := &object{
+ size: size, // excludes padding
+ cgn: cgn,
+ data: data,
+ }
+ objNode.obj = o
+
+ return o
+}
+
+// makeFunctionObject creates and returns a new function object
+// (contour) for fn, and returns the id of its first node. It also
+// enqueues fn for subsequent constraint generation.
+//
+// For a context-sensitive contour, callersite identifies the sole
+// callsite; for shared contours, caller is nil.
+//
+func (a *analysis) makeFunctionObject(fn *ssa.Function, callersite *callsite) nodeid {
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\t---- makeFunctionObject %s\n", fn)
+ }
+
+ // obj is the function object (identity, params, results).
+ obj := a.nextNode()
+ cgn := a.makeCGNode(fn, obj, callersite)
+ sig := fn.Signature
+ a.addOneNode(sig, "func.cgnode", nil) // (scalar with Signature type)
+ if recv := sig.Recv(); recv != nil {
+ a.addNodes(recv.Type(), "func.recv")
+ }
+ a.addNodes(sig.Params(), "func.params")
+ a.addNodes(sig.Results(), "func.results")
+ a.endObject(obj, cgn, fn).flags |= otFunction
+
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\t----\n")
+ }
+
+ // Queue it up for constraint processing.
+ a.genq = append(a.genq, cgn)
+
+ return obj
+}
+
+// makeTagged creates a tagged object of type typ.
+func (a *analysis) makeTagged(typ types.Type, cgn *cgnode, data interface{}) nodeid {
+ obj := a.addOneNode(typ, "tagged.T", nil) // NB: type may be non-scalar!
+ a.addNodes(typ, "tagged.v")
+ a.endObject(obj, cgn, data).flags |= otTagged
+ return obj
+}
+
+// makeRtype returns the canonical tagged object of type *rtype whose
+// payload points to the sole rtype object for T.
+//
+// TODO(adonovan): move to reflect.go; it's part of the solver really.
+//
+func (a *analysis) makeRtype(T types.Type) nodeid {
+ if v := a.rtypes.At(T); v != nil {
+ return v.(nodeid)
+ }
+
+ // Create the object for the reflect.rtype itself, which is
+ // ordinarily a large struct but here a single node will do.
+ obj := a.nextNode()
+ a.addOneNode(T, "reflect.rtype", nil)
+ a.endObject(obj, nil, T)
+
+ id := a.makeTagged(a.reflectRtypePtr, nil, T)
+ a.nodes[id+1].typ = T // trick (each *rtype tagged object is a singleton)
+ a.addressOf(a.reflectRtypePtr, id+1, obj)
+
+ a.rtypes.Set(T, id)
+ return id
+}
+
+// rtypeValue returns the type of the *reflect.rtype-tagged object obj.
+func (a *analysis) rtypeTaggedValue(obj nodeid) types.Type {
+ tDyn, t, _ := a.taggedValue(obj)
+ if tDyn != a.reflectRtypePtr {
+ panic(fmt.Sprintf("not a *reflect.rtype-tagged object: obj=n%d tag=%v payload=n%d", obj, tDyn, t))
+ }
+ return a.nodes[t].typ
+}
+
+// valueNode returns the id of the value node for v, creating it (and
+// the association) as needed. It may return zero for uninteresting
+// values containing no pointers.
+//
+func (a *analysis) valueNode(v ssa.Value) nodeid {
+ // Value nodes for locals are created en masse by genFunc.
+ if id, ok := a.localval[v]; ok {
+ return id
+ }
+
+ // Value nodes for globals are created on demand.
+ id, ok := a.globalval[v]
+ if !ok {
+ var comment string
+ if a.log != nil {
+ comment = v.String()
+ }
+ id = a.addNodes(v.Type(), comment)
+ if obj := a.objectNode(nil, v); obj != 0 {
+ a.addressOf(v.Type(), id, obj)
+ }
+ a.setValueNode(v, id, nil)
+ }
+ return id
+}
+
+// valueOffsetNode ascertains the node for tuple/struct value v,
+// then returns the node for its subfield #index.
+//
+func (a *analysis) valueOffsetNode(v ssa.Value, index int) nodeid {
+ id := a.valueNode(v)
+ if id == 0 {
+ panic(fmt.Sprintf("cannot offset within n0: %s = %s", v.Name(), v))
+ }
+ return id + nodeid(a.offsetOf(v.Type(), index))
+}
+
+// isTaggedObject reports whether object obj is a tagged object.
+func (a *analysis) isTaggedObject(obj nodeid) bool {
+ return a.nodes[obj].obj.flags&otTagged != 0
+}
+
+// taggedValue returns the dynamic type tag, the (first node of the)
+// payload, and the indirect flag of the tagged object starting at id.
+// Panic ensues if !isTaggedObject(id).
+//
+func (a *analysis) taggedValue(obj nodeid) (tDyn types.Type, v nodeid, indirect bool) {
+ n := a.nodes[obj]
+ flags := n.obj.flags
+ if flags&otTagged == 0 {
+ panic(fmt.Sprintf("not a tagged object: n%d", obj))
+ }
+ return n.typ, obj + 1, flags&otIndirect != 0
+}
+
+// funcParams returns the first node of the params (P) block of the
+// function whose object node (obj.flags&otFunction) is id.
+//
+func (a *analysis) funcParams(id nodeid) nodeid {
+ n := a.nodes[id]
+ if n.obj == nil || n.obj.flags&otFunction == 0 {
+ panic(fmt.Sprintf("funcParams(n%d): not a function object block", id))
+ }
+ return id + 1
+}
+
+// funcResults returns the first node of the results (R) block of the
+// function whose object node (obj.flags&otFunction) is id.
+//
+func (a *analysis) funcResults(id nodeid) nodeid {
+ n := a.nodes[id]
+ if n.obj == nil || n.obj.flags&otFunction == 0 {
+ panic(fmt.Sprintf("funcResults(n%d): not a function object block", id))
+ }
+ sig := n.typ.(*types.Signature)
+ id += 1 + nodeid(a.sizeof(sig.Params()))
+ if sig.Recv() != nil {
+ id += nodeid(a.sizeof(sig.Recv().Type()))
+ }
+ return id
+}
+
+// ---------- Constraint creation ----------
+
+// copy creates a constraint of the form dst = src.
+// sizeof is the width (in logical fields) of the copied type.
+//
+func (a *analysis) copy(dst, src nodeid, sizeof uint32) {
+ if src == dst || sizeof == 0 {
+ return // trivial
+ }
+ if src == 0 || dst == 0 {
+ panic(fmt.Sprintf("ill-typed copy dst=n%d src=n%d", dst, src))
+ }
+ for i := uint32(0); i < sizeof; i++ {
+ a.addConstraint(©Constraint{dst, src})
+ src++
+ dst++
+ }
+}
+
+// addressOf creates a constraint of the form id = &obj.
+// T is the type of the address.
+func (a *analysis) addressOf(T types.Type, id, obj nodeid) {
+ if id == 0 {
+ panic("addressOf: zero id")
+ }
+ if obj == 0 {
+ panic("addressOf: zero obj")
+ }
+ if a.shouldTrack(T) {
+ a.addConstraint(&addrConstraint{id, obj})
+ }
+}
+
+// load creates a load constraint of the form dst = src[offset].
+// offset is the pointer offset in logical fields.
+// sizeof is the width (in logical fields) of the loaded type.
+//
+func (a *analysis) load(dst, src nodeid, offset, sizeof uint32) {
+ if dst == 0 {
+ return // load of non-pointerlike value
+ }
+ if src == 0 && dst == 0 {
+ return // non-pointerlike operation
+ }
+ if src == 0 || dst == 0 {
+ panic(fmt.Sprintf("ill-typed load dst=n%d src=n%d", dst, src))
+ }
+ for i := uint32(0); i < sizeof; i++ {
+ a.addConstraint(&loadConstraint{offset, dst, src})
+ offset++
+ dst++
+ }
+}
+
+// store creates a store constraint of the form dst[offset] = src.
+// offset is the pointer offset in logical fields.
+// sizeof is the width (in logical fields) of the stored type.
+//
+func (a *analysis) store(dst, src nodeid, offset uint32, sizeof uint32) {
+ if src == 0 {
+ return // store of non-pointerlike value
+ }
+ if src == 0 && dst == 0 {
+ return // non-pointerlike operation
+ }
+ if src == 0 || dst == 0 {
+ panic(fmt.Sprintf("ill-typed store dst=n%d src=n%d", dst, src))
+ }
+ for i := uint32(0); i < sizeof; i++ {
+ a.addConstraint(&storeConstraint{offset, dst, src})
+ offset++
+ src++
+ }
+}
+
+// offsetAddr creates an offsetAddr constraint of the form dst = &src.#offset.
+// offset is the field offset in logical fields.
+// T is the type of the address.
+//
+func (a *analysis) offsetAddr(T types.Type, dst, src nodeid, offset uint32) {
+ if !a.shouldTrack(T) {
+ return
+ }
+ if offset == 0 {
+ // Simplify dst = &src->f0
+ // to dst = src
+ // (NB: this optimisation is defeated by the identity
+ // field prepended to struct and array objects.)
+ a.copy(dst, src, 1)
+ } else {
+ a.addConstraint(&offsetAddrConstraint{offset, dst, src})
+ }
+}
+
+// typeAssert creates a typeFilter or untag constraint of the form dst = src.(T):
+// typeFilter for an interface, untag for a concrete type.
+// The exact flag is specified as for untagConstraint.
+//
+func (a *analysis) typeAssert(T types.Type, dst, src nodeid, exact bool) {
+ if isInterface(T) {
+ a.addConstraint(&typeFilterConstraint{T, dst, src})
+ } else {
+ a.addConstraint(&untagConstraint{T, dst, src, exact})
+ }
+}
+
+// addConstraint adds c to the constraint set.
+func (a *analysis) addConstraint(c constraint) {
+ a.constraints = append(a.constraints, c)
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\t%s\n", c)
+ }
+}
+
+// copyElems generates load/store constraints for *dst = *src,
+// where src and dst are slices or *arrays.
+//
+func (a *analysis) copyElems(cgn *cgnode, typ types.Type, dst, src ssa.Value) {
+ tmp := a.addNodes(typ, "copy")
+ sz := a.sizeof(typ)
+ a.genLoad(cgn, tmp, src, 1, sz)
+ a.genStore(cgn, dst, tmp, 1, sz)
+}
+
+// ---------- Constraint generation ----------
+
+// genConv generates constraints for the conversion operation conv.
+func (a *analysis) genConv(conv *ssa.Convert, cgn *cgnode) {
+ res := a.valueNode(conv)
+ if res == 0 {
+ return // result is non-pointerlike
+ }
+
+ tSrc := conv.X.Type()
+ tDst := conv.Type()
+
+ switch utSrc := tSrc.Underlying().(type) {
+ case *types.Slice:
+ // []byte/[]rune -> string?
+ return
+
+ case *types.Pointer:
+ // *T -> unsafe.Pointer?
+ if tDst.Underlying() == tUnsafePtr {
+ return // we don't model unsafe aliasing (unsound)
+ }
+
+ case *types.Basic:
+ switch tDst.Underlying().(type) {
+ case *types.Pointer:
+ // Treat unsafe.Pointer->*T conversions like
+ // new(T) and create an unaliased object.
+ if utSrc == tUnsafePtr {
+ obj := a.addNodes(mustDeref(tDst), "unsafe.Pointer conversion")
+ a.endObject(obj, cgn, conv)
+ a.addressOf(tDst, res, obj)
+ return
+ }
+
+ case *types.Slice:
+ // string -> []byte/[]rune (or named aliases)?
+ if utSrc.Info()&types.IsString != 0 {
+ obj := a.addNodes(sliceToArray(tDst), "convert")
+ a.endObject(obj, cgn, conv)
+ a.addressOf(tDst, res, obj)
+ return
+ }
+
+ case *types.Basic:
+ // All basic-to-basic type conversions are no-ops.
+ // This includes uintptr<->unsafe.Pointer conversions,
+ // which we (unsoundly) ignore.
+ return
+ }
+ }
+
+ panic(fmt.Sprintf("illegal *ssa.Convert %s -> %s: %s", tSrc, tDst, conv.Parent()))
+}
+
+// genAppend generates constraints for a call to append.
+func (a *analysis) genAppend(instr *ssa.Call, cgn *cgnode) {
+ // Consider z = append(x, y). y is optional.
+ // This may allocate a new [1]T array; call its object w.
+ // We get the following constraints:
+ // z = x
+ // z = &w
+ // *z = *y
+
+ x := instr.Call.Args[0]
+
+ z := instr
+ a.copy(a.valueNode(z), a.valueNode(x), 1) // z = x
+
+ if len(instr.Call.Args) == 1 {
+ return // no allocation for z = append(x) or _ = append(x).
+ }
+
+ // TODO(adonovan): test append([]byte, ...string) []byte.
+
+ y := instr.Call.Args[1]
+ tArray := sliceToArray(instr.Call.Args[0].Type())
+
+ var w nodeid
+ w = a.nextNode()
+ a.addNodes(tArray, "append")
+ a.endObject(w, cgn, instr)
+
+ a.copyElems(cgn, tArray.Elem(), z, y) // *z = *y
+ a.addressOf(instr.Type(), a.valueNode(z), w) // z = &w
+}
+
+// genBuiltinCall generates contraints for a call to a built-in.
+func (a *analysis) genBuiltinCall(instr ssa.CallInstruction, cgn *cgnode) {
+ call := instr.Common()
+ switch call.Value.(*ssa.Builtin).Name() {
+ case "append":
+ // Safe cast: append cannot appear in a go or defer statement.
+ a.genAppend(instr.(*ssa.Call), cgn)
+
+ case "copy":
+ tElem := call.Args[0].Type().Underlying().(*types.Slice).Elem()
+ a.copyElems(cgn, tElem, call.Args[0], call.Args[1])
+
+ case "panic":
+ a.copy(a.panicNode, a.valueNode(call.Args[0]), 1)
+
+ case "recover":
+ if v := instr.Value(); v != nil {
+ a.copy(a.valueNode(v), a.panicNode, 1)
+ }
+
+ case "print":
+ // In the tests, the probe might be the sole reference
+ // to its arg, so make sure we create nodes for it.
+ a.valueNode(call.Args[0])
+
+ case "ssa:wrapnilchk":
+ a.copy(a.valueNode(instr.Value()), a.valueNode(call.Args[0]), 1)
+
+ default:
+ // No-ops: close len cap real imag complex print println delete.
+ }
+}
+
+// shouldUseContext defines the context-sensitivity policy. It
+// returns true if we should analyse all static calls to fn anew.
+//
+// Obviously this interface rather limits how much freedom we have to
+// choose a policy. The current policy, rather arbitrarily, is true
+// for intrinsics and accessor methods (actually: short, single-block,
+// call-free functions). This is just a starting point.
+//
+func (a *analysis) shouldUseContext(fn *ssa.Function) bool {
+ if a.findIntrinsic(fn) != nil {
+ return true // treat intrinsics context-sensitively
+ }
+ if len(fn.Blocks) != 1 {
+ return false // too expensive
+ }
+ blk := fn.Blocks[0]
+ if len(blk.Instrs) > 10 {
+ return false // too expensive
+ }
+ if fn.Synthetic != "" && (fn.Pkg == nil || fn != fn.Pkg.Func("init")) {
+ return true // treat synthetic wrappers context-sensitively
+ }
+ for _, instr := range blk.Instrs {
+ switch instr := instr.(type) {
+ case ssa.CallInstruction:
+ // Disallow function calls (except to built-ins)
+ // because of the danger of unbounded recursion.
+ if _, ok := instr.Common().Value.(*ssa.Builtin); !ok {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// genStaticCall generates constraints for a statically dispatched function call.
+func (a *analysis) genStaticCall(caller *cgnode, site *callsite, call *ssa.CallCommon, result nodeid) {
+ fn := call.StaticCallee()
+
+ // Special cases for inlined intrinsics.
+ switch fn {
+ case a.runtimeSetFinalizer:
+ // Inline SetFinalizer so the call appears direct.
+ site.targets = a.addOneNode(tInvalid, "SetFinalizer.targets", nil)
+ a.addConstraint(&runtimeSetFinalizerConstraint{
+ targets: site.targets,
+ x: a.valueNode(call.Args[0]),
+ f: a.valueNode(call.Args[1]),
+ })
+ return
+
+ case a.reflectValueCall:
+ // Inline (reflect.Value).Call so the call appears direct.
+ dotdotdot := false
+ ret := reflectCallImpl(a, caller, site, a.valueNode(call.Args[0]), a.valueNode(call.Args[1]), dotdotdot)
+ if result != 0 {
+ a.addressOf(fn.Signature.Results().At(0).Type(), result, ret)
+ }
+ return
+ }
+
+ // Ascertain the context (contour/cgnode) for a particular call.
+ var obj nodeid
+ if a.shouldUseContext(fn) {
+ obj = a.makeFunctionObject(fn, site) // new contour
+ } else {
+ obj = a.objectNode(nil, fn) // shared contour
+ }
+ a.callEdge(caller, site, obj)
+
+ sig := call.Signature()
+
+ // Copy receiver, if any.
+ params := a.funcParams(obj)
+ args := call.Args
+ if sig.Recv() != nil {
+ sz := a.sizeof(sig.Recv().Type())
+ a.copy(params, a.valueNode(args[0]), sz)
+ params += nodeid(sz)
+ args = args[1:]
+ }
+
+ // Copy actual parameters into formal params block.
+ // Must loop, since the actuals aren't contiguous.
+ for i, arg := range args {
+ sz := a.sizeof(sig.Params().At(i).Type())
+ a.copy(params, a.valueNode(arg), sz)
+ params += nodeid(sz)
+ }
+
+ // Copy formal results block to actual result.
+ if result != 0 {
+ a.copy(result, a.funcResults(obj), a.sizeof(sig.Results()))
+ }
+}
+
+// genDynamicCall generates constraints for a dynamic function call.
+func (a *analysis) genDynamicCall(caller *cgnode, site *callsite, call *ssa.CallCommon, result nodeid) {
+ // pts(targets) will be the set of possible call targets.
+ site.targets = a.valueNode(call.Value)
+
+ // We add dynamic closure rules that store the arguments into
+ // the P-block and load the results from the R-block of each
+ // function discovered in pts(targets).
+
+ sig := call.Signature()
+ var offset uint32 = 1 // P/R block starts at offset 1
+ for i, arg := range call.Args {
+ sz := a.sizeof(sig.Params().At(i).Type())
+ a.genStore(caller, call.Value, a.valueNode(arg), offset, sz)
+ offset += sz
+ }
+ if result != 0 {
+ a.genLoad(caller, result, call.Value, offset, a.sizeof(sig.Results()))
+ }
+}
+
+// genInvoke generates constraints for a dynamic method invocation.
+func (a *analysis) genInvoke(caller *cgnode, site *callsite, call *ssa.CallCommon, result nodeid) {
+ if call.Value.Type() == a.reflectType {
+ a.genInvokeReflectType(caller, site, call, result)
+ return
+ }
+
+ sig := call.Signature()
+
+ // Allocate a contiguous targets/params/results block for this call.
+ block := a.nextNode()
+ // pts(targets) will be the set of possible call targets
+ site.targets = a.addOneNode(sig, "invoke.targets", nil)
+ p := a.addNodes(sig.Params(), "invoke.params")
+ r := a.addNodes(sig.Results(), "invoke.results")
+
+ // Copy the actual parameters into the call's params block.
+ for i, n := 0, sig.Params().Len(); i < n; i++ {
+ sz := a.sizeof(sig.Params().At(i).Type())
+ a.copy(p, a.valueNode(call.Args[i]), sz)
+ p += nodeid(sz)
+ }
+ // Copy the call's results block to the actual results.
+ if result != 0 {
+ a.copy(result, r, a.sizeof(sig.Results()))
+ }
+
+ // We add a dynamic invoke constraint that will connect the
+ // caller's and the callee's P/R blocks for each discovered
+ // call target.
+ a.addConstraint(&invokeConstraint{call.Method, a.valueNode(call.Value), block})
+}
+
+// genInvokeReflectType is a specialization of genInvoke where the
+// receiver type is a reflect.Type, under the assumption that there
+// can be at most one implementation of this interface, *reflect.rtype.
+//
+// (Though this may appear to be an instance of a pattern---method
+// calls on interfaces known to have exactly one implementation---in
+// practice it occurs rarely, so we special case for reflect.Type.)
+//
+// In effect we treat this:
+// var rt reflect.Type = ...
+// rt.F()
+// as this:
+// rt.(*reflect.rtype).F()
+//
+func (a *analysis) genInvokeReflectType(caller *cgnode, site *callsite, call *ssa.CallCommon, result nodeid) {
+ // Unpack receiver into rtype
+ rtype := a.addOneNode(a.reflectRtypePtr, "rtype.recv", nil)
+ recv := a.valueNode(call.Value)
+ a.typeAssert(a.reflectRtypePtr, rtype, recv, true)
+
+ // Look up the concrete method.
+ fn := a.prog.LookupMethod(a.reflectRtypePtr, call.Method.Pkg(), call.Method.Name())
+
+ obj := a.makeFunctionObject(fn, site) // new contour for this call
+ a.callEdge(caller, site, obj)
+
+ // From now on, it's essentially a static call, but little is
+ // gained by factoring together the code for both cases.
+
+ sig := fn.Signature // concrete method
+ targets := a.addOneNode(sig, "call.targets", nil)
+ a.addressOf(sig, targets, obj) // (a singleton)
+
+ // Copy receiver.
+ params := a.funcParams(obj)
+ a.copy(params, rtype, 1)
+ params++
+
+ // Copy actual parameters into formal P-block.
+ // Must loop, since the actuals aren't contiguous.
+ for i, arg := range call.Args {
+ sz := a.sizeof(sig.Params().At(i).Type())
+ a.copy(params, a.valueNode(arg), sz)
+ params += nodeid(sz)
+ }
+
+ // Copy formal R-block to actual R-block.
+ if result != 0 {
+ a.copy(result, a.funcResults(obj), a.sizeof(sig.Results()))
+ }
+}
+
+// genCall generates constraints for call instruction instr.
+func (a *analysis) genCall(caller *cgnode, instr ssa.CallInstruction) {
+ call := instr.Common()
+
+ // Intrinsic implementations of built-in functions.
+ if _, ok := call.Value.(*ssa.Builtin); ok {
+ a.genBuiltinCall(instr, caller)
+ return
+ }
+
+ var result nodeid
+ if v := instr.Value(); v != nil {
+ result = a.valueNode(v)
+ }
+
+ site := &callsite{instr: instr}
+ if call.StaticCallee() != nil {
+ a.genStaticCall(caller, site, call, result)
+ } else if call.IsInvoke() {
+ a.genInvoke(caller, site, call, result)
+ } else {
+ a.genDynamicCall(caller, site, call, result)
+ }
+
+ caller.sites = append(caller.sites, site)
+
+ if a.log != nil {
+ // TODO(adonovan): debug: improve log message.
+ fmt.Fprintf(a.log, "\t%s to targets %s from %s\n", site, site.targets, caller)
+ }
+}
+
+// objectNode returns the object to which v points, if known.
+// In other words, if the points-to set of v is a singleton, it
+// returns the sole label, zero otherwise.
+//
+// We exploit this information to make the generated constraints less
+// dynamic. For example, a complex load constraint can be replaced by
+// a simple copy constraint when the sole destination is known a priori.
+//
+// Some SSA instructions always have singletons points-to sets:
+// Alloc, Function, Global, MakeChan, MakeClosure, MakeInterface, MakeMap, MakeSlice.
+// Others may be singletons depending on their operands:
+// FreeVar, Const, Convert, FieldAddr, IndexAddr, Slice.
+//
+// Idempotent. Objects are created as needed, possibly via recursion
+// down the SSA value graph, e.g IndexAddr(FieldAddr(Alloc))).
+//
+func (a *analysis) objectNode(cgn *cgnode, v ssa.Value) nodeid {
+ switch v.(type) {
+ case *ssa.Global, *ssa.Function, *ssa.Const, *ssa.FreeVar:
+ // Global object.
+ obj, ok := a.globalobj[v]
+ if !ok {
+ switch v := v.(type) {
+ case *ssa.Global:
+ obj = a.nextNode()
+ a.addNodes(mustDeref(v.Type()), "global")
+ a.endObject(obj, nil, v)
+
+ case *ssa.Function:
+ obj = a.makeFunctionObject(v, nil)
+
+ case *ssa.Const:
+ // not addressable
+
+ case *ssa.FreeVar:
+ // not addressable
+ }
+
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\tglobalobj[%s] = n%d\n", v, obj)
+ }
+ a.globalobj[v] = obj
+ }
+ return obj
+ }
+
+ // Local object.
+ obj, ok := a.localobj[v]
+ if !ok {
+ switch v := v.(type) {
+ case *ssa.Alloc:
+ obj = a.nextNode()
+ a.addNodes(mustDeref(v.Type()), "alloc")
+ a.endObject(obj, cgn, v)
+
+ case *ssa.MakeSlice:
+ obj = a.nextNode()
+ a.addNodes(sliceToArray(v.Type()), "makeslice")
+ a.endObject(obj, cgn, v)
+
+ case *ssa.MakeChan:
+ obj = a.nextNode()
+ a.addNodes(v.Type().Underlying().(*types.Chan).Elem(), "makechan")
+ a.endObject(obj, cgn, v)
+
+ case *ssa.MakeMap:
+ obj = a.nextNode()
+ tmap := v.Type().Underlying().(*types.Map)
+ a.addNodes(tmap.Key(), "makemap.key")
+ elem := a.addNodes(tmap.Elem(), "makemap.value")
+
+ // To update the value field, MapUpdate
+ // generates store-with-offset constraints which
+ // the presolver can't model, so we must mark
+ // those nodes indirect.
+ for id, end := elem, elem+nodeid(a.sizeof(tmap.Elem())); id < end; id++ {
+ a.mapValues = append(a.mapValues, id)
+ }
+ a.endObject(obj, cgn, v)
+
+ case *ssa.MakeInterface:
+ tConc := v.X.Type()
+ obj = a.makeTagged(tConc, cgn, v)
+
+ // Copy the value into it, if nontrivial.
+ if x := a.valueNode(v.X); x != 0 {
+ a.copy(obj+1, x, a.sizeof(tConc))
+ }
+
+ case *ssa.FieldAddr:
+ if xobj := a.objectNode(cgn, v.X); xobj != 0 {
+ obj = xobj + nodeid(a.offsetOf(mustDeref(v.X.Type()), v.Field))
+ }
+
+ case *ssa.IndexAddr:
+ if xobj := a.objectNode(cgn, v.X); xobj != 0 {
+ obj = xobj + 1
+ }
+
+ case *ssa.Slice:
+ obj = a.objectNode(cgn, v.X)
+
+ case *ssa.Convert:
+ // TODO(adonovan): opt: handle these cases too:
+ // - unsafe.Pointer->*T conversion acts like Alloc
+ // - string->[]byte/[]rune conversion acts like MakeSlice
+ }
+
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\tlocalobj[%s] = n%d\n", v.Name(), obj)
+ }
+ a.localobj[v] = obj
+ }
+ return obj
+}
+
+// genLoad generates constraints for result = *(ptr + val).
+func (a *analysis) genLoad(cgn *cgnode, result nodeid, ptr ssa.Value, offset, sizeof uint32) {
+ if obj := a.objectNode(cgn, ptr); obj != 0 {
+ // Pre-apply loadConstraint.solve().
+ a.copy(result, obj+nodeid(offset), sizeof)
+ } else {
+ a.load(result, a.valueNode(ptr), offset, sizeof)
+ }
+}
+
+// genOffsetAddr generates constraints for a 'v=ptr.field' (FieldAddr)
+// or 'v=ptr[*]' (IndexAddr) instruction v.
+func (a *analysis) genOffsetAddr(cgn *cgnode, v ssa.Value, ptr nodeid, offset uint32) {
+ dst := a.valueNode(v)
+ if obj := a.objectNode(cgn, v); obj != 0 {
+ // Pre-apply offsetAddrConstraint.solve().
+ a.addressOf(v.Type(), dst, obj)
+ } else {
+ a.offsetAddr(v.Type(), dst, ptr, offset)
+ }
+}
+
+// genStore generates constraints for *(ptr + offset) = val.
+func (a *analysis) genStore(cgn *cgnode, ptr ssa.Value, val nodeid, offset, sizeof uint32) {
+ if obj := a.objectNode(cgn, ptr); obj != 0 {
+ // Pre-apply storeConstraint.solve().
+ a.copy(obj+nodeid(offset), val, sizeof)
+ } else {
+ a.store(a.valueNode(ptr), val, offset, sizeof)
+ }
+}
+
+// genInstr generates constraints for instruction instr in context cgn.
+func (a *analysis) genInstr(cgn *cgnode, instr ssa.Instruction) {
+ if a.log != nil {
+ var prefix string
+ if val, ok := instr.(ssa.Value); ok {
+ prefix = val.Name() + " = "
+ }
+ fmt.Fprintf(a.log, "; %s%s\n", prefix, instr)
+ }
+
+ switch instr := instr.(type) {
+ case *ssa.DebugRef:
+ // no-op.
+
+ case *ssa.UnOp:
+ switch instr.Op {
+ case token.ARROW: // <-x
+ // We can ignore instr.CommaOk because the node we're
+ // altering is always at zero offset relative to instr
+ tElem := instr.X.Type().Underlying().(*types.Chan).Elem()
+ a.genLoad(cgn, a.valueNode(instr), instr.X, 0, a.sizeof(tElem))
+
+ case token.MUL: // *x
+ a.genLoad(cgn, a.valueNode(instr), instr.X, 0, a.sizeof(instr.Type()))
+
+ default:
+ // NOT, SUB, XOR: no-op.
+ }
+
+ case *ssa.BinOp:
+ // All no-ops.
+
+ case ssa.CallInstruction: // *ssa.Call, *ssa.Go, *ssa.Defer
+ a.genCall(cgn, instr)
+
+ case *ssa.ChangeType:
+ a.copy(a.valueNode(instr), a.valueNode(instr.X), 1)
+
+ case *ssa.Convert:
+ a.genConv(instr, cgn)
+
+ case *ssa.Extract:
+ a.copy(a.valueNode(instr),
+ a.valueOffsetNode(instr.Tuple, instr.Index),
+ a.sizeof(instr.Type()))
+
+ case *ssa.FieldAddr:
+ a.genOffsetAddr(cgn, instr, a.valueNode(instr.X),
+ a.offsetOf(mustDeref(instr.X.Type()), instr.Field))
+
+ case *ssa.IndexAddr:
+ a.genOffsetAddr(cgn, instr, a.valueNode(instr.X), 1)
+
+ case *ssa.Field:
+ a.copy(a.valueNode(instr),
+ a.valueOffsetNode(instr.X, instr.Field),
+ a.sizeof(instr.Type()))
+
+ case *ssa.Index:
+ a.copy(a.valueNode(instr), 1+a.valueNode(instr.X), a.sizeof(instr.Type()))
+
+ case *ssa.Select:
+ recv := a.valueOffsetNode(instr, 2) // instr : (index, recvOk, recv0, ... recv_n-1)
+ for _, st := range instr.States {
+ elemSize := a.sizeof(st.Chan.Type().Underlying().(*types.Chan).Elem())
+ switch st.Dir {
+ case types.RecvOnly:
+ a.genLoad(cgn, recv, st.Chan, 0, elemSize)
+ recv += nodeid(elemSize)
+
+ case types.SendOnly:
+ a.genStore(cgn, st.Chan, a.valueNode(st.Send), 0, elemSize)
+ }
+ }
+
+ case *ssa.Return:
+ results := a.funcResults(cgn.obj)
+ for _, r := range instr.Results {
+ sz := a.sizeof(r.Type())
+ a.copy(results, a.valueNode(r), sz)
+ results += nodeid(sz)
+ }
+
+ case *ssa.Send:
+ a.genStore(cgn, instr.Chan, a.valueNode(instr.X), 0, a.sizeof(instr.X.Type()))
+
+ case *ssa.Store:
+ a.genStore(cgn, instr.Addr, a.valueNode(instr.Val), 0, a.sizeof(instr.Val.Type()))
+
+ case *ssa.Alloc, *ssa.MakeSlice, *ssa.MakeChan, *ssa.MakeMap, *ssa.MakeInterface:
+ v := instr.(ssa.Value)
+ a.addressOf(v.Type(), a.valueNode(v), a.objectNode(cgn, v))
+
+ case *ssa.ChangeInterface:
+ a.copy(a.valueNode(instr), a.valueNode(instr.X), 1)
+
+ case *ssa.TypeAssert:
+ a.typeAssert(instr.AssertedType, a.valueNode(instr), a.valueNode(instr.X), true)
+
+ case *ssa.Slice:
+ a.copy(a.valueNode(instr), a.valueNode(instr.X), 1)
+
+ case *ssa.If, *ssa.Jump:
+ // no-op.
+
+ case *ssa.Phi:
+ sz := a.sizeof(instr.Type())
+ for _, e := range instr.Edges {
+ a.copy(a.valueNode(instr), a.valueNode(e), sz)
+ }
+
+ case *ssa.MakeClosure:
+ fn := instr.Fn.(*ssa.Function)
+ a.copy(a.valueNode(instr), a.valueNode(fn), 1)
+ // Free variables are treated like global variables.
+ for i, b := range instr.Bindings {
+ a.copy(a.valueNode(fn.FreeVars[i]), a.valueNode(b), a.sizeof(b.Type()))
+ }
+
+ case *ssa.RunDefers:
+ // The analysis is flow insensitive, so we just "call"
+ // defers as we encounter them.
+
+ case *ssa.Range:
+ // Do nothing. Next{Iter: *ssa.Range} handles this case.
+
+ case *ssa.Next:
+ if !instr.IsString { // map
+ // Assumes that Next is always directly applied to a Range result.
+ theMap := instr.Iter.(*ssa.Range).X
+ tMap := theMap.Type().Underlying().(*types.Map)
+ ksize := a.sizeof(tMap.Key())
+ vsize := a.sizeof(tMap.Elem())
+
+ // Load from the map's (k,v) into the tuple's (ok, k, v).
+ a.genLoad(cgn, a.valueNode(instr)+1, theMap, 0, ksize+vsize)
+ }
+
+ case *ssa.Lookup:
+ if tMap, ok := instr.X.Type().Underlying().(*types.Map); ok {
+ // CommaOk can be ignored: field 0 is a no-op.
+ ksize := a.sizeof(tMap.Key())
+ vsize := a.sizeof(tMap.Elem())
+ a.genLoad(cgn, a.valueNode(instr), instr.X, ksize, vsize)
+ }
+
+ case *ssa.MapUpdate:
+ tmap := instr.Map.Type().Underlying().(*types.Map)
+ ksize := a.sizeof(tmap.Key())
+ vsize := a.sizeof(tmap.Elem())
+ a.genStore(cgn, instr.Map, a.valueNode(instr.Key), 0, ksize)
+ a.genStore(cgn, instr.Map, a.valueNode(instr.Value), ksize, vsize)
+
+ case *ssa.Panic:
+ a.copy(a.panicNode, a.valueNode(instr.X), 1)
+
+ default:
+ panic(fmt.Sprintf("unimplemented: %T", instr))
+ }
+}
+
+func (a *analysis) makeCGNode(fn *ssa.Function, obj nodeid, callersite *callsite) *cgnode {
+ cgn := &cgnode{fn: fn, obj: obj, callersite: callersite}
+ a.cgnodes = append(a.cgnodes, cgn)
+ return cgn
+}
+
+// genRootCalls generates the synthetic root of the callgraph and the
+// initial calls from it to the analysis scope, such as main, a test
+// or a library.
+//
+func (a *analysis) genRootCalls() *cgnode {
+ r := a.prog.NewFunction("", new(types.Signature), "root of callgraph")
+ root := a.makeCGNode(r, 0, nil)
+
+ // TODO(adonovan): make an ssa utility to construct an actual
+ // root function so we don't need to special-case site-less
+ // call edges.
+
+ // For each main package, call main.init(), main.main().
+ for _, mainPkg := range a.config.Mains {
+ main := mainPkg.Func("main")
+ if main == nil {
+ panic(fmt.Sprintf("%s has no main function", mainPkg))
+ }
+
+ targets := a.addOneNode(main.Signature, "root.targets", nil)
+ site := &callsite{targets: targets}
+ root.sites = append(root.sites, site)
+ for _, fn := range [2]*ssa.Function{mainPkg.Func("init"), main} {
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\troot call to %s:\n", fn)
+ }
+ a.copy(targets, a.valueNode(fn), 1)
+ }
+ }
+
+ return root
+}
+
+// genFunc generates constraints for function fn.
+func (a *analysis) genFunc(cgn *cgnode) {
+ fn := cgn.fn
+
+ impl := a.findIntrinsic(fn)
+
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\n\n==== Generating constraints for %s, %s\n", cgn, cgn.contour())
+
+ // Hack: don't display body if intrinsic.
+ if impl != nil {
+ fn2 := *cgn.fn // copy
+ fn2.Locals = nil
+ fn2.Blocks = nil
+ fn2.WriteTo(a.log)
+ } else {
+ cgn.fn.WriteTo(a.log)
+ }
+ }
+
+ if impl != nil {
+ impl(a, cgn)
+ return
+ }
+
+ if fn.Blocks == nil {
+ // External function with no intrinsic treatment.
+ // We'll warn about calls to such functions at the end.
+ return
+ }
+
+ if a.log != nil {
+ fmt.Fprintln(a.log, "; Creating nodes for local values")
+ }
+
+ a.localval = make(map[ssa.Value]nodeid)
+ a.localobj = make(map[ssa.Value]nodeid)
+
+ // The value nodes for the params are in the func object block.
+ params := a.funcParams(cgn.obj)
+ for _, p := range fn.Params {
+ a.setValueNode(p, params, cgn)
+ params += nodeid(a.sizeof(p.Type()))
+ }
+
+ // Free variables have global cardinality:
+ // the outer function sets them with MakeClosure;
+ // the inner function accesses them with FreeVar.
+ //
+ // TODO(adonovan): treat free vars context-sensitively.
+
+ // Create value nodes for all value instructions
+ // since SSA may contain forward references.
+ var space [10]*ssa.Value
+ for _, b := range fn.Blocks {
+ for _, instr := range b.Instrs {
+ switch instr := instr.(type) {
+ case *ssa.Range:
+ // do nothing: it has a funky type,
+ // and *ssa.Next does all the work.
+
+ case ssa.Value:
+ var comment string
+ if a.log != nil {
+ comment = instr.Name()
+ }
+ id := a.addNodes(instr.Type(), comment)
+ a.setValueNode(instr, id, cgn)
+ }
+
+ // Record all address-taken functions (for presolver).
+ rands := instr.Operands(space[:0])
+ if call, ok := instr.(ssa.CallInstruction); ok && !call.Common().IsInvoke() {
+ // Skip CallCommon.Value in "call" mode.
+ // TODO(adonovan): fix: relies on unspecified ordering. Specify it.
+ rands = rands[1:]
+ }
+ for _, rand := range rands {
+ if atf, ok := (*rand).(*ssa.Function); ok {
+ a.atFuncs[atf] = true
+ }
+ }
+ }
+ }
+
+ // Generate constraints for instructions.
+ for _, b := range fn.Blocks {
+ for _, instr := range b.Instrs {
+ a.genInstr(cgn, instr)
+ }
+ }
+
+ a.localval = nil
+ a.localobj = nil
+}
+
+// genMethodsOf generates nodes and constraints for all methods of type T.
+func (a *analysis) genMethodsOf(T types.Type) {
+ itf := isInterface(T)
+
+ // TODO(adonovan): can we skip this entirely if itf is true?
+ // I think so, but the answer may depend on reflection.
+ mset := a.prog.MethodSets.MethodSet(T)
+ for i, n := 0, mset.Len(); i < n; i++ {
+ m := a.prog.Method(mset.At(i))
+ a.valueNode(m)
+
+ if !itf {
+ // Methods of concrete types are address-taken functions.
+ a.atFuncs[m] = true
+ }
+ }
+}
+
+// generate generates offline constraints for the entire program.
+func (a *analysis) generate() {
+ start("Constraint generation")
+ if a.log != nil {
+ fmt.Fprintln(a.log, "==== Generating constraints")
+ }
+
+ // Create a dummy node since we use the nodeid 0 for
+ // non-pointerlike variables.
+ a.addNodes(tInvalid, "(zero)")
+
+ // Create the global node for panic values.
+ a.panicNode = a.addNodes(tEface, "panic")
+
+ // Create nodes and constraints for all methods of reflect.rtype.
+ // (Shared contours are used by dynamic calls to reflect.Type
+ // methods---typically just String().)
+ if rtype := a.reflectRtypePtr; rtype != nil {
+ a.genMethodsOf(rtype)
+ }
+
+ root := a.genRootCalls()
+
+ if a.config.BuildCallGraph {
+ a.result.CallGraph = callgraph.New(root.fn)
+ }
+
+ // Create nodes and constraints for all methods of all types
+ // that are dynamically accessible via reflection or interfaces.
+ for _, T := range a.prog.TypesWithMethodSets() {
+ a.genMethodsOf(T)
+ }
+
+ // Generate constraints for entire program.
+ for len(a.genq) > 0 {
+ cgn := a.genq[0]
+ a.genq = a.genq[1:]
+ a.genFunc(cgn)
+ }
+
+ // The runtime magically allocates os.Args; so should we.
+ if os := a.prog.ImportedPackage("os"); os != nil {
+ // In effect: os.Args = new([1]string)[:]
+ T := types.NewSlice(types.Typ[types.String])
+ obj := a.addNodes(sliceToArray(T), "")
+ a.endObject(obj, nil, "")
+ a.addressOf(T, a.objectNode(nil, os.Var("Args")), obj)
+ }
+
+ // Discard generation state, to avoid confusion after node renumbering.
+ a.panicNode = 0
+ a.globalval = nil
+ a.localval = nil
+ a.localobj = nil
+
+ stop("Constraint generation")
+}
diff --git a/llgo/third_party/go.tools/go/pointer/hvn.go b/llgo/third_party/go.tools/go/pointer/hvn.go
new file mode 100644
index 0000000000000000000000000000000000000000..84422a236067c10226d2a18f20e27e118460934a
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/hvn.go
@@ -0,0 +1,969 @@
+package pointer
+
+// This file implements Hash-Value Numbering (HVN), a pre-solver
+// constraint optimization described in Hardekopf & Lin, SAS'07 (see
+// doc.go) that analyses the graph topology to determine which sets of
+// variables are "pointer equivalent" (PE), i.e. must have identical
+// points-to sets in the solution.
+//
+// A separate ("offline") graph is constructed. Its nodes are those of
+// the main-graph, plus an additional node *X for each pointer node X.
+// With this graph we can reason about the unknown points-to set of
+// dereferenced pointers. (We do not generalize this to represent
+// unknown fields x->f, perhaps because such fields would be numerous,
+// though it might be worth an experiment.)
+//
+// Nodes whose points-to relations are not entirely captured by the
+// graph are marked as "indirect": the *X nodes, the parameters of
+// address-taken functions (which includes all functions in method
+// sets), or nodes updated by the solver rules for reflection, etc.
+//
+// All addr (y=&x) nodes are initially assigned a pointer-equivalence
+// (PE) label equal to x's nodeid in the main graph. (These are the
+// only PE labels that are less than len(a.nodes).)
+//
+// All offsetAddr (y=&x.f) constraints are initially assigned a PE
+// label; such labels are memoized, keyed by (x, f), so that equivalent
+// nodes y as assigned the same label.
+//
+// Then we process each strongly connected component (SCC) of the graph
+// in topological order, assigning it a PE label based on the set P of
+// PE labels that flow to it from its immediate dependencies.
+//
+// If any node in P is "indirect", the entire SCC is assigned a fresh PE
+// label. Otherwise:
+//
+// |P|=0 if P is empty, all nodes in the SCC are non-pointers (e.g.
+// uninitialized variables, or formal params of dead functions)
+// and the SCC is assigned the PE label of zero.
+//
+// |P|=1 if P is a singleton, the SCC is assigned the same label as the
+// sole element of P.
+//
+// |P|>1 if P contains multiple labels, a unique label representing P is
+// invented and recorded in an hash table, so that other
+// equivalent SCCs may also be assigned this label, akin to
+// conventional hash-value numbering in a compiler.
+//
+// Finally, a renumbering is computed such that each node is replaced by
+// the lowest-numbered node with the same PE label. All constraints are
+// renumbered, and any resulting duplicates are eliminated.
+//
+// The only nodes that are not renumbered are the objects x in addr
+// (y=&x) constraints, since the ids of these nodes (and fields derived
+// from them via offsetAddr rules) are the elements of all points-to
+// sets, so they must remain as they are if we want the same solution.
+//
+// The solverStates (node.solve) for nodes in the same equivalence class
+// are linked together so that all nodes in the class have the same
+// solution. This avoids the need to renumber nodeids buried in
+// Queries, cgnodes, etc (like (*analysis).renumber() does) since only
+// the solution is needed.
+//
+// The result of HVN is that the number of distinct nodes and
+// constraints is reduced, but the solution is identical (almost---see
+// CROSS-CHECK below). In particular, both linear and cyclic chains of
+// copies are each replaced by a single node.
+//
+// Nodes and constraints created "online" (e.g. while solving reflection
+// constraints) are not subject to this optimization.
+//
+// PERFORMANCE
+//
+// In two benchmarks (oracle and godoc), HVN eliminates about two thirds
+// of nodes, the majority accounted for by non-pointers: nodes of
+// non-pointer type, pointers that remain nil, formal parameters of dead
+// functions, nodes of untracked types, etc. It also reduces the number
+// of constraints, also by about two thirds, and the solving time by
+// 30--42%, although we must pay about 15% for the running time of HVN
+// itself. The benefit is greater for larger applications.
+//
+// There are many possible optimizations to improve the performance:
+// * Use fewer than 1:1 onodes to main graph nodes: many of the onodes
+// we create are not needed.
+// * HU (HVN with Union---see paper): coalesce "union" peLabels when
+// their expanded-out sets are equal.
+// * HR (HVN with deReference---see paper): this will require that we
+// apply HVN until fixed point, which may need more bookkeeping of the
+// correspondance of main nodes to onodes.
+// * Location Equivalence (see paper): have points-to sets contain not
+// locations but location-equivalence class labels, each representing
+// a set of locations.
+// * HVN with field-sensitive ref: model each of the fields of a
+// pointer-to-struct.
+//
+// CROSS-CHECK
+//
+// To verify the soundness of the optimization, when the
+// debugHVNCrossCheck option is enabled, we run the solver twice, once
+// before and once after running HVN, dumping the solution to disk, and
+// then we compare the results. If they are not identical, the analysis
+// panics.
+//
+// The solution dumped to disk includes only the N*N submatrix of the
+// complete solution where N is the number of nodes after generation.
+// In other words, we ignore pointer variables and objects created by
+// the solver itself, since their numbering depends on the solver order,
+// which is affected by the optimization. In any case, that's the only
+// part the client cares about.
+//
+// The cross-check is too strict and may fail spuriously. Although the
+// H&L paper describing HVN states that the solutions obtained should be
+// identical, this is not the case in practice because HVN can collapse
+// cycles involving *p even when pts(p)={}. Consider this example
+// distilled from testdata/hello.go:
+//
+// var x T
+// func f(p **T) {
+// t0 = *p
+// ...
+// t1 = φ(t0, &x)
+// *p = t1
+// }
+//
+// If f is dead code, we get:
+// unoptimized: pts(p)={} pts(t0)={} pts(t1)={&x}
+// optimized: pts(p)={} pts(t0)=pts(t1)=pts(*p)={&x}
+//
+// It's hard to argue that this is a bug: the result is sound and the
+// loss of precision is inconsequential---f is dead code, after all.
+// But unfortunately it limits the usefulness of the cross-check since
+// failures must be carefully analyzed. Ben Hardekopf suggests (in
+// personal correspondence) some approaches to mitigating it:
+//
+// If there is a node with an HVN points-to set that is a superset
+// of the NORM points-to set, then either it's a bug or it's a
+// result of this issue. If it's a result of this issue, then in
+// the offline constraint graph there should be a REF node inside
+// some cycle that reaches this node, and in the NORM solution the
+// pointer being dereferenced by that REF node should be the empty
+// set. If that isn't true then this is a bug. If it is true, then
+// you can further check that in the NORM solution the "extra"
+// points-to info in the HVN solution does in fact come from that
+// purported cycle (if it doesn't, then this is still a bug). If
+// you're doing the further check then you'll need to do it for
+// each "extra" points-to element in the HVN points-to set.
+//
+// There are probably ways to optimize these checks by taking
+// advantage of graph properties. For example, extraneous points-to
+// info will flow through the graph and end up in many
+// nodes. Rather than checking every node with extra info, you
+// could probably work out the "origin point" of the extra info and
+// just check there. Note that the check in the first bullet is
+// looking for soundness bugs, while the check in the second bullet
+// is looking for precision bugs; depending on your needs, you may
+// care more about one than the other.
+//
+// which we should evaluate. The cross-check is nonetheless invaluable
+// for all but one of the programs in the pointer_test suite.
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+
+ "llvm.org/llgo/third_party/go.tools/container/intsets"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// A peLabel is a pointer-equivalence label: two nodes with the same
+// peLabel have identical points-to solutions.
+//
+// The numbers are allocated consecutively like so:
+// 0 not a pointer
+// 1..N-1 addrConstraints (equals the constraint's .src field, hence sparse)
+// ... offsetAddr constraints
+// ... SCCs (with indirect nodes or multiple inputs)
+//
+// Each PE label denotes a set of pointers containing a single addr, a
+// single offsetAddr, or some set of other PE labels.
+//
+type peLabel int
+
+type hvn struct {
+ a *analysis
+ N int // len(a.nodes) immediately after constraint generation
+ log io.Writer // (optional) log of HVN lemmas
+ onodes []*onode // nodes of the offline graph
+ label peLabel // the next available PE label
+ hvnLabel map[string]peLabel // hash-value numbering (PE label) for each set of onodeids
+ stack []onodeid // DFS stack
+ index int32 // next onode.index, from Tarjan's SCC algorithm
+
+ // For each distinct offsetAddrConstraint (src, offset) pair,
+ // offsetAddrLabels records a unique PE label >= N.
+ offsetAddrLabels map[offsetAddr]peLabel
+}
+
+// The index of an node in the offline graph.
+// (Currently the first N align with the main nodes,
+// but this may change with HRU.)
+type onodeid uint32
+
+// An onode is a node in the offline constraint graph.
+// (Where ambiguous, members of analysis.nodes are referred to as
+// "main graph" nodes.)
+//
+// Edges in the offline constraint graph (edges and implicit) point to
+// the source, i.e. against the flow of values: they are dependencies.
+// Implicit edges are used for SCC computation, but not for gathering
+// incoming labels.
+//
+type onode struct {
+ rep onodeid // index of representative of SCC in offline constraint graph
+
+ edges intsets.Sparse // constraint edges X-->Y (this onode is X)
+ implicit intsets.Sparse // implicit edges *X-->*Y (this onode is X)
+ peLabels intsets.Sparse // set of peLabels are pointer-equivalent to this one
+ indirect bool // node has points-to relations not represented in graph
+
+ // Tarjan's SCC algorithm
+ index, lowlink int32 // Tarjan numbering
+ scc int32 // -ve => on stack; 0 => unvisited; +ve => node is root of a found SCC
+}
+
+type offsetAddr struct {
+ ptr nodeid
+ offset uint32
+}
+
+// nextLabel issues the next unused pointer-equivalence label.
+func (h *hvn) nextLabel() peLabel {
+ h.label++
+ return h.label
+}
+
+// ref(X) returns the index of the onode for *X.
+func (h *hvn) ref(id onodeid) onodeid {
+ return id + onodeid(len(h.a.nodes))
+}
+
+// hvn computes pointer-equivalence labels (peLabels) using the Hash-based
+// Value Numbering (HVN) algorithm described in Hardekopf & Lin, SAS'07.
+//
+func (a *analysis) hvn() {
+ start("HVN")
+
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\n\n==== Pointer equivalence optimization\n\n")
+ }
+
+ h := hvn{
+ a: a,
+ N: len(a.nodes),
+ log: a.log,
+ hvnLabel: make(map[string]peLabel),
+ offsetAddrLabels: make(map[offsetAddr]peLabel),
+ }
+
+ if h.log != nil {
+ fmt.Fprintf(h.log, "\nCreating offline graph nodes...\n")
+ }
+
+ // Create offline nodes. The first N nodes correspond to main
+ // graph nodes; the next N are their corresponding ref() nodes.
+ h.onodes = make([]*onode, 2*h.N)
+ for id := range a.nodes {
+ id := onodeid(id)
+ h.onodes[id] = &onode{}
+ h.onodes[h.ref(id)] = &onode{indirect: true}
+ }
+
+ // Each node initially represents just itself.
+ for id, o := range h.onodes {
+ o.rep = onodeid(id)
+ }
+
+ h.markIndirectNodes()
+
+ // Reserve the first N PE labels for addrConstraints.
+ h.label = peLabel(h.N)
+
+ // Add offline constraint edges.
+ if h.log != nil {
+ fmt.Fprintf(h.log, "\nAdding offline graph edges...\n")
+ }
+ for _, c := range a.constraints {
+ if debugHVNVerbose && h.log != nil {
+ fmt.Fprintf(h.log, "; %s\n", c)
+ }
+ c.presolve(&h)
+ }
+
+ // Find and collapse SCCs.
+ if h.log != nil {
+ fmt.Fprintf(h.log, "\nFinding SCCs...\n")
+ }
+ h.index = 1
+ for id, o := range h.onodes {
+ if id > 0 && o.index == 0 {
+ // Start depth-first search at each unvisited node.
+ h.visit(onodeid(id))
+ }
+ }
+
+ // Dump the solution
+ // (NB: somewhat redundant with logging from simplify().)
+ if debugHVNVerbose && h.log != nil {
+ fmt.Fprintf(h.log, "\nPointer equivalences:\n")
+ for id, o := range h.onodes {
+ if id == 0 {
+ continue
+ }
+ if id == int(h.N) {
+ fmt.Fprintf(h.log, "---\n")
+ }
+ fmt.Fprintf(h.log, "o%d\t", id)
+ if o.rep != onodeid(id) {
+ fmt.Fprintf(h.log, "rep=o%d", o.rep)
+ } else {
+ fmt.Fprintf(h.log, "p%d", o.peLabels.Min())
+ if o.indirect {
+ fmt.Fprint(h.log, " indirect")
+ }
+ }
+ fmt.Fprintln(h.log)
+ }
+ }
+
+ // Simplify the main constraint graph
+ h.simplify()
+
+ a.showCounts()
+
+ stop("HVN")
+}
+
+// ---- constraint-specific rules ----
+
+// dst := &src
+func (c *addrConstraint) presolve(h *hvn) {
+ // Each object (src) is an initial PE label.
+ label := peLabel(c.src) // label < N
+ if debugHVNVerbose && h.log != nil {
+ // duplicate log messages are possible
+ fmt.Fprintf(h.log, "\tcreate p%d: {&n%d}\n", label, c.src)
+ }
+ odst := onodeid(c.dst)
+ osrc := onodeid(c.src)
+
+ // Assign dst this label.
+ h.onodes[odst].peLabels.Insert(int(label))
+ if debugHVNVerbose && h.log != nil {
+ fmt.Fprintf(h.log, "\to%d has p%d\n", odst, label)
+ }
+
+ h.addImplicitEdge(h.ref(odst), osrc) // *dst ~~> src.
+}
+
+// dst = src
+func (c *copyConstraint) presolve(h *hvn) {
+ odst := onodeid(c.dst)
+ osrc := onodeid(c.src)
+ h.addEdge(odst, osrc) // dst --> src
+ h.addImplicitEdge(h.ref(odst), h.ref(osrc)) // *dst ~~> *src
+}
+
+// dst = *src + offset
+func (c *loadConstraint) presolve(h *hvn) {
+ odst := onodeid(c.dst)
+ osrc := onodeid(c.src)
+ if c.offset == 0 {
+ h.addEdge(odst, h.ref(osrc)) // dst --> *src
+ } else {
+ // We don't interpret load-with-offset, e.g. results
+ // of map value lookup, R-block of dynamic call, slice
+ // copy/append, reflection.
+ h.markIndirect(odst, "load with offset")
+ }
+}
+
+// *dst + offset = src
+func (c *storeConstraint) presolve(h *hvn) {
+ odst := onodeid(c.dst)
+ osrc := onodeid(c.src)
+ if c.offset == 0 {
+ h.onodes[h.ref(odst)].edges.Insert(int(osrc)) // *dst --> src
+ if debugHVNVerbose && h.log != nil {
+ fmt.Fprintf(h.log, "\to%d --> o%d\n", h.ref(odst), osrc)
+ }
+ } else {
+ // We don't interpret store-with-offset.
+ // See discussion of soundness at markIndirectNodes.
+ }
+}
+
+// dst = &src.offset
+func (c *offsetAddrConstraint) presolve(h *hvn) {
+ // Give each distinct (addr, offset) pair a fresh PE label.
+ // The cache performs CSE, effectively.
+ key := offsetAddr{c.src, c.offset}
+ label, ok := h.offsetAddrLabels[key]
+ if !ok {
+ label = h.nextLabel()
+ h.offsetAddrLabels[key] = label
+ if debugHVNVerbose && h.log != nil {
+ fmt.Fprintf(h.log, "\tcreate p%d: {&n%d.#%d}\n",
+ label, c.src, c.offset)
+ }
+ }
+
+ // Assign dst this label.
+ h.onodes[c.dst].peLabels.Insert(int(label))
+ if debugHVNVerbose && h.log != nil {
+ fmt.Fprintf(h.log, "\to%d has p%d\n", c.dst, label)
+ }
+}
+
+// dst = src.(typ) where typ is an interface
+func (c *typeFilterConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.dst), "typeFilter result")
+}
+
+// dst = src.(typ) where typ is concrete
+func (c *untagConstraint) presolve(h *hvn) {
+ odst := onodeid(c.dst)
+ for end := odst + onodeid(h.a.sizeof(c.typ)); odst < end; odst++ {
+ h.markIndirect(odst, "untag result")
+ }
+}
+
+// dst = src.method(c.params...)
+func (c *invokeConstraint) presolve(h *hvn) {
+ // All methods are address-taken functions, so
+ // their formal P-blocks were already marked indirect.
+
+ // Mark the caller's targets node as indirect.
+ sig := c.method.Type().(*types.Signature)
+ id := c.params
+ h.markIndirect(onodeid(c.params), "invoke targets node")
+ id++
+
+ id += nodeid(h.a.sizeof(sig.Params()))
+
+ // Mark the caller's R-block as indirect.
+ end := id + nodeid(h.a.sizeof(sig.Results()))
+ for id < end {
+ h.markIndirect(onodeid(id), "invoke R-block")
+ id++
+ }
+}
+
+// markIndirectNodes marks as indirect nodes whose points-to relations
+// are not entirely captured by the offline graph, including:
+//
+// (a) All address-taken nodes (including the following nodes within
+// the same object). This is described in the paper.
+//
+// The most subtle cause of indirect nodes is the generation of
+// store-with-offset constraints since the offline graph doesn't
+// represent them. A global audit of constraint generation reveals the
+// following uses of store-with-offset:
+//
+// (b) genDynamicCall, for P-blocks of dynamically called functions,
+// to which dynamic copy edges will be added to them during
+// solving: from storeConstraint for standalone functions,
+// and from invokeConstraint for methods.
+// All such P-blocks must be marked indirect.
+// (c) MakeUpdate, to update the value part of a map object.
+// All MakeMap objects's value parts must be marked indirect.
+// (d) copyElems, to update the destination array.
+// All array elements must be marked indirect.
+//
+// Not all indirect marking happens here. ref() nodes are marked
+// indirect at construction, and each constraint's presolve() method may
+// mark additional nodes.
+//
+func (h *hvn) markIndirectNodes() {
+ // (a) all address-taken nodes, plus all nodes following them
+ // within the same object, since these may be indirectly
+ // stored or address-taken.
+ for _, c := range h.a.constraints {
+ if c, ok := c.(*addrConstraint); ok {
+ start := h.a.enclosingObj(c.src)
+ end := start + nodeid(h.a.nodes[start].obj.size)
+ for id := c.src; id < end; id++ {
+ h.markIndirect(onodeid(id), "A-T object")
+ }
+ }
+ }
+
+ // (b) P-blocks of all address-taken functions.
+ for id := 0; id < h.N; id++ {
+ obj := h.a.nodes[id].obj
+
+ // TODO(adonovan): opt: if obj.cgn.fn is a method and
+ // obj.cgn is not its shared contour, this is an
+ // "inlined" static method call. We needn't consider it
+ // address-taken since no invokeConstraint will affect it.
+
+ if obj != nil && obj.flags&otFunction != 0 && h.a.atFuncs[obj.cgn.fn] {
+ // address-taken function
+ if debugHVNVerbose && h.log != nil {
+ fmt.Fprintf(h.log, "n%d is address-taken: %s\n", id, obj.cgn.fn)
+ }
+ h.markIndirect(onodeid(id), "A-T func identity")
+ id++
+ sig := obj.cgn.fn.Signature
+ psize := h.a.sizeof(sig.Params())
+ if sig.Recv() != nil {
+ psize += h.a.sizeof(sig.Recv().Type())
+ }
+ for end := id + int(psize); id < end; id++ {
+ h.markIndirect(onodeid(id), "A-T func P-block")
+ }
+ id--
+ continue
+ }
+ }
+
+ // (c) all map objects' value fields.
+ for _, id := range h.a.mapValues {
+ h.markIndirect(onodeid(id), "makemap.value")
+ }
+
+ // (d) all array element objects.
+ // TODO(adonovan): opt: can we do better?
+ for id := 0; id < h.N; id++ {
+ // Identity node for an object of array type?
+ if tArray, ok := h.a.nodes[id].typ.(*types.Array); ok {
+ // Mark the array element nodes indirect.
+ // (Skip past the identity field.)
+ for _ = range h.a.flatten(tArray.Elem()) {
+ id++
+ h.markIndirect(onodeid(id), "array elem")
+ }
+ }
+ }
+}
+
+func (h *hvn) markIndirect(oid onodeid, comment string) {
+ h.onodes[oid].indirect = true
+ if debugHVNVerbose && h.log != nil {
+ fmt.Fprintf(h.log, "\to%d is indirect: %s\n", oid, comment)
+ }
+}
+
+// Adds an edge dst-->src.
+// Note the unusual convention: edges are dependency (contraflow) edges.
+func (h *hvn) addEdge(odst, osrc onodeid) {
+ h.onodes[odst].edges.Insert(int(osrc))
+ if debugHVNVerbose && h.log != nil {
+ fmt.Fprintf(h.log, "\to%d --> o%d\n", odst, osrc)
+ }
+}
+
+func (h *hvn) addImplicitEdge(odst, osrc onodeid) {
+ h.onodes[odst].implicit.Insert(int(osrc))
+ if debugHVNVerbose && h.log != nil {
+ fmt.Fprintf(h.log, "\to%d ~~> o%d\n", odst, osrc)
+ }
+}
+
+// visit implements the depth-first search of Tarjan's SCC algorithm.
+// Precondition: x is canonical.
+func (h *hvn) visit(x onodeid) {
+ h.checkCanonical(x)
+ xo := h.onodes[x]
+ xo.index = h.index
+ xo.lowlink = h.index
+ h.index++
+
+ h.stack = append(h.stack, x) // push
+ assert(xo.scc == 0, "node revisited")
+ xo.scc = -1
+
+ var deps []int
+ deps = xo.edges.AppendTo(deps)
+ deps = xo.implicit.AppendTo(deps)
+
+ for _, y := range deps {
+ // Loop invariant: x is canonical.
+
+ y := h.find(onodeid(y))
+
+ if x == y {
+ continue // nodes already coalesced
+ }
+
+ xo := h.onodes[x]
+ yo := h.onodes[y]
+
+ switch {
+ case yo.scc > 0:
+ // y is already a collapsed SCC
+
+ case yo.scc < 0:
+ // y is on the stack, and thus in the current SCC.
+ if yo.index < xo.lowlink {
+ xo.lowlink = yo.index
+ }
+
+ default:
+ // y is unvisited; visit it now.
+ h.visit(y)
+ // Note: x and y are now non-canonical.
+
+ x = h.find(onodeid(x))
+
+ if yo.lowlink < xo.lowlink {
+ xo.lowlink = yo.lowlink
+ }
+ }
+ }
+ h.checkCanonical(x)
+
+ // Is x the root of an SCC?
+ if xo.lowlink == xo.index {
+ // Coalesce all nodes in the SCC.
+ if debugHVNVerbose && h.log != nil {
+ fmt.Fprintf(h.log, "scc o%d\n", x)
+ }
+ for {
+ // Pop y from stack.
+ i := len(h.stack) - 1
+ y := h.stack[i]
+ h.stack = h.stack[:i]
+
+ h.checkCanonical(x)
+ xo := h.onodes[x]
+ h.checkCanonical(y)
+ yo := h.onodes[y]
+
+ if xo == yo {
+ // SCC is complete.
+ xo.scc = 1
+ h.labelSCC(x)
+ break
+ }
+ h.coalesce(x, y)
+ }
+ }
+}
+
+// Precondition: x is canonical.
+func (h *hvn) labelSCC(x onodeid) {
+ h.checkCanonical(x)
+ xo := h.onodes[x]
+ xpe := &xo.peLabels
+
+ // All indirect nodes get new labels.
+ if xo.indirect {
+ label := h.nextLabel()
+ if debugHVNVerbose && h.log != nil {
+ fmt.Fprintf(h.log, "\tcreate p%d: indirect SCC\n", label)
+ fmt.Fprintf(h.log, "\to%d has p%d\n", x, label)
+ }
+
+ // Remove pre-labeling, in case a direct pre-labeled node was
+ // merged with an indirect one.
+ xpe.Clear()
+ xpe.Insert(int(label))
+
+ return
+ }
+
+ // Invariant: all peLabels sets are non-empty.
+ // Those that are logically empty contain zero as their sole element.
+ // No other sets contains zero.
+
+ // Find all labels coming in to the coalesced SCC node.
+ for _, y := range xo.edges.AppendTo(nil) {
+ y := h.find(onodeid(y))
+ if y == x {
+ continue // already coalesced
+ }
+ ype := &h.onodes[y].peLabels
+ if debugHVNVerbose && h.log != nil {
+ fmt.Fprintf(h.log, "\tedge from o%d = %s\n", y, ype)
+ }
+
+ if ype.IsEmpty() {
+ if debugHVNVerbose && h.log != nil {
+ fmt.Fprintf(h.log, "\tnode has no PE label\n")
+ }
+ }
+ assert(!ype.IsEmpty(), "incoming node has no PE label")
+
+ if ype.Has(0) {
+ // {0} represents a non-pointer.
+ assert(ype.Len() == 1, "PE set contains {0, ...}")
+ } else {
+ xpe.UnionWith(ype)
+ }
+ }
+
+ switch xpe.Len() {
+ case 0:
+ // SCC has no incoming non-zero PE labels: it is a non-pointer.
+ xpe.Insert(0)
+
+ case 1:
+ // already a singleton
+
+ default:
+ // SCC has multiple incoming non-zero PE labels.
+ // Find the canonical label representing this set.
+ // We use String() as a fingerprint consistent with Equals().
+ key := xpe.String()
+ label, ok := h.hvnLabel[key]
+ if !ok {
+ label = h.nextLabel()
+ if debugHVNVerbose && h.log != nil {
+ fmt.Fprintf(h.log, "\tcreate p%d: union %s\n", label, xpe.String())
+ }
+ h.hvnLabel[key] = label
+ }
+ xpe.Clear()
+ xpe.Insert(int(label))
+ }
+
+ if debugHVNVerbose && h.log != nil {
+ fmt.Fprintf(h.log, "\to%d has p%d\n", x, xpe.Min())
+ }
+}
+
+// coalesce combines two nodes in the offline constraint graph.
+// Precondition: x and y are canonical.
+func (h *hvn) coalesce(x, y onodeid) {
+ xo := h.onodes[x]
+ yo := h.onodes[y]
+
+ // x becomes y's canonical representative.
+ yo.rep = x
+
+ if debugHVNVerbose && h.log != nil {
+ fmt.Fprintf(h.log, "\tcoalesce o%d into o%d\n", y, x)
+ }
+
+ // x accumulates y's edges.
+ xo.edges.UnionWith(&yo.edges)
+ yo.edges.Clear()
+
+ // x accumulates y's implicit edges.
+ xo.implicit.UnionWith(&yo.implicit)
+ yo.implicit.Clear()
+
+ // x accumulates y's pointer-equivalence labels.
+ xo.peLabels.UnionWith(&yo.peLabels)
+ yo.peLabels.Clear()
+
+ // x accumulates y's indirect flag.
+ if yo.indirect {
+ xo.indirect = true
+ }
+}
+
+// simplify computes a degenerate renumbering of nodeids from the PE
+// labels assigned by the hvn, and uses it to simplify the main
+// constraint graph, eliminating non-pointer nodes and duplicate
+// constraints.
+//
+func (h *hvn) simplify() {
+ // canon maps each peLabel to its canonical main node.
+ canon := make([]nodeid, h.label)
+ for i := range canon {
+ canon[i] = nodeid(h.N) // indicates "unset"
+ }
+
+ // mapping maps each main node index to the index of the canonical node.
+ mapping := make([]nodeid, len(h.a.nodes))
+
+ for id := range h.a.nodes {
+ id := nodeid(id)
+ if id == 0 {
+ canon[0] = 0
+ mapping[0] = 0
+ continue
+ }
+ oid := h.find(onodeid(id))
+ peLabels := &h.onodes[oid].peLabels
+ assert(peLabels.Len() == 1, "PE class is not a singleton")
+ label := peLabel(peLabels.Min())
+
+ canonId := canon[label]
+ if canonId == nodeid(h.N) {
+ // id becomes the representative of the PE label.
+ canonId = id
+ canon[label] = canonId
+
+ if h.a.log != nil {
+ fmt.Fprintf(h.a.log, "\tpts(n%d) is canonical : \t(%s)\n",
+ id, h.a.nodes[id].typ)
+ }
+
+ } else {
+ // Link the solver states for the two nodes.
+ assert(h.a.nodes[canonId].solve != nil, "missing solver state")
+ h.a.nodes[id].solve = h.a.nodes[canonId].solve
+
+ if h.a.log != nil {
+ // TODO(adonovan): debug: reorganize the log so it prints
+ // one line:
+ // pe y = x1, ..., xn
+ // for each canonical y. Requires allocation.
+ fmt.Fprintf(h.a.log, "\tpts(n%d) = pts(n%d) : %s\n",
+ id, canonId, h.a.nodes[id].typ)
+ }
+ }
+
+ mapping[id] = canonId
+ }
+
+ // Renumber the constraints, eliminate duplicates, and eliminate
+ // any containing non-pointers (n0).
+ addrs := make(map[addrConstraint]bool)
+ copys := make(map[copyConstraint]bool)
+ loads := make(map[loadConstraint]bool)
+ stores := make(map[storeConstraint]bool)
+ offsetAddrs := make(map[offsetAddrConstraint]bool)
+ untags := make(map[untagConstraint]bool)
+ typeFilters := make(map[typeFilterConstraint]bool)
+ invokes := make(map[invokeConstraint]bool)
+
+ nbefore := len(h.a.constraints)
+ cc := h.a.constraints[:0] // in-situ compaction
+ for _, c := range h.a.constraints {
+ // Renumber.
+ switch c := c.(type) {
+ case *addrConstraint:
+ // Don't renumber c.src since it is the label of
+ // an addressable object and will appear in PT sets.
+ c.dst = mapping[c.dst]
+ default:
+ c.renumber(mapping)
+ }
+
+ if c.ptr() == 0 {
+ continue // skip: constraint attached to non-pointer
+ }
+
+ var dup bool
+ switch c := c.(type) {
+ case *addrConstraint:
+ _, dup = addrs[*c]
+ addrs[*c] = true
+
+ case *copyConstraint:
+ if c.src == c.dst {
+ continue // skip degenerate copies
+ }
+ if c.src == 0 {
+ continue // skip copy from non-pointer
+ }
+ _, dup = copys[*c]
+ copys[*c] = true
+
+ case *loadConstraint:
+ if c.src == 0 {
+ continue // skip load from non-pointer
+ }
+ _, dup = loads[*c]
+ loads[*c] = true
+
+ case *storeConstraint:
+ if c.src == 0 {
+ continue // skip store from non-pointer
+ }
+ _, dup = stores[*c]
+ stores[*c] = true
+
+ case *offsetAddrConstraint:
+ if c.src == 0 {
+ continue // skip offset from non-pointer
+ }
+ _, dup = offsetAddrs[*c]
+ offsetAddrs[*c] = true
+
+ case *untagConstraint:
+ if c.src == 0 {
+ continue // skip untag of non-pointer
+ }
+ _, dup = untags[*c]
+ untags[*c] = true
+
+ case *typeFilterConstraint:
+ if c.src == 0 {
+ continue // skip filter of non-pointer
+ }
+ _, dup = typeFilters[*c]
+ typeFilters[*c] = true
+
+ case *invokeConstraint:
+ if c.params == 0 {
+ panic("non-pointer invoke.params")
+ }
+ if c.iface == 0 {
+ continue // skip invoke on non-pointer
+ }
+ _, dup = invokes[*c]
+ invokes[*c] = true
+
+ default:
+ // We don't bother de-duping advanced constraints
+ // (e.g. reflection) since they are uncommon.
+
+ // Eliminate constraints containing non-pointer nodeids.
+ //
+ // We use reflection to find the fields to avoid
+ // adding yet another method to constraint.
+ //
+ // TODO(adonovan): experiment with a constraint
+ // method that returns a slice of pointers to
+ // nodeids fields to enable uniform iteration;
+ // the renumber() method could be removed and
+ // implemented using the new one.
+ //
+ // TODO(adonovan): opt: this is unsound since
+ // some constraints still have an effect if one
+ // of the operands is zero: rVCall, rVMapIndex,
+ // rvSetMapIndex. Handle them specially.
+ rtNodeid := reflect.TypeOf(nodeid(0))
+ x := reflect.ValueOf(c).Elem()
+ for i, nf := 0, x.NumField(); i < nf; i++ {
+ f := x.Field(i)
+ if f.Type() == rtNodeid {
+ if f.Uint() == 0 {
+ dup = true // skip it
+ break
+ }
+ }
+ }
+ }
+ if dup {
+ continue // skip duplicates
+ }
+
+ cc = append(cc, c)
+ }
+ h.a.constraints = cc
+
+ if h.log != nil {
+ fmt.Fprintf(h.log, "#constraints: was %d, now %d\n", nbefore, len(h.a.constraints))
+ }
+}
+
+// find returns the canonical onodeid for x.
+// (The onodes form a disjoint set forest.)
+func (h *hvn) find(x onodeid) onodeid {
+ // TODO(adonovan): opt: this is a CPU hotspot. Try "union by rank".
+ xo := h.onodes[x]
+ rep := xo.rep
+ if rep != x {
+ rep = h.find(rep) // simple path compression
+ xo.rep = rep
+ }
+ return rep
+}
+
+func (h *hvn) checkCanonical(x onodeid) {
+ if debugHVN {
+ assert(x == h.find(x), "not canonical")
+ }
+}
+
+func assert(p bool, msg string) {
+ if debugHVN && !p {
+ panic("assertion failed: " + msg)
+ }
+}
diff --git a/llgo/third_party/go.tools/go/pointer/intrinsics.go b/llgo/third_party/go.tools/go/pointer/intrinsics.go
new file mode 100644
index 0000000000000000000000000000000000000000..866cd0b37bc401149cc284a16c339c4226716570
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/intrinsics.go
@@ -0,0 +1,380 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pointer
+
+// This package defines the treatment of intrinsics, i.e. library
+// functions requiring special analytical treatment.
+//
+// Most of these are C or assembly functions, but even some Go
+// functions require may special treatment if the analysis completely
+// replaces the implementation of an API such as reflection.
+
+// TODO(adonovan): support a means of writing analytic summaries in
+// the target code, so that users can summarise the effects of their
+// own C functions using a snippet of Go.
+
+import (
+ "fmt"
+
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// Instances of 'intrinsic' generate analysis constraints for calls to
+// intrinsic functions.
+// Implementations may exploit information from the calling site
+// via cgn.callersite; for shared contours this is nil.
+type intrinsic func(a *analysis, cgn *cgnode)
+
+// Initialized in explicit init() to defeat (spurious) initialization
+// cycle error.
+var intrinsicsByName = make(map[string]intrinsic)
+
+func init() {
+ // Key strings are from Function.String().
+ // That little dot ۰ is an Arabic zero numeral (U+06F0),
+ // categories [Nd].
+ for name, fn := range map[string]intrinsic{
+ // Other packages.
+ "bytes.Equal": ext۰NoEffect,
+ "bytes.IndexByte": ext۰NoEffect,
+ "crypto/aes.decryptBlockAsm": ext۰NoEffect,
+ "crypto/aes.encryptBlockAsm": ext۰NoEffect,
+ "crypto/aes.expandKeyAsm": ext۰NoEffect,
+ "crypto/aes.hasAsm": ext۰NoEffect,
+ "crypto/md5.block": ext۰NoEffect,
+ "crypto/rc4.xorKeyStream": ext۰NoEffect,
+ "crypto/sha1.block": ext۰NoEffect,
+ "crypto/sha256.block": ext۰NoEffect,
+ "hash/crc32.castagnoliSSE42": ext۰NoEffect,
+ "hash/crc32.haveSSE42": ext۰NoEffect,
+ "math.Abs": ext۰NoEffect,
+ "math.Acos": ext۰NoEffect,
+ "math.Asin": ext۰NoEffect,
+ "math.Atan": ext۰NoEffect,
+ "math.Atan2": ext۰NoEffect,
+ "math.Ceil": ext۰NoEffect,
+ "math.Cos": ext۰NoEffect,
+ "math.Dim": ext۰NoEffect,
+ "math.Exp": ext۰NoEffect,
+ "math.Exp2": ext۰NoEffect,
+ "math.Expm1": ext۰NoEffect,
+ "math.Float32bits": ext۰NoEffect,
+ "math.Float32frombits": ext۰NoEffect,
+ "math.Float64bits": ext۰NoEffect,
+ "math.Float64frombits": ext۰NoEffect,
+ "math.Floor": ext۰NoEffect,
+ "math.Frexp": ext۰NoEffect,
+ "math.Hypot": ext۰NoEffect,
+ "math.Ldexp": ext۰NoEffect,
+ "math.Log": ext۰NoEffect,
+ "math.Log10": ext۰NoEffect,
+ "math.Log1p": ext۰NoEffect,
+ "math.Log2": ext۰NoEffect,
+ "math.Max": ext۰NoEffect,
+ "math.Min": ext۰NoEffect,
+ "math.Mod": ext۰NoEffect,
+ "math.Modf": ext۰NoEffect,
+ "math.Remainder": ext۰NoEffect,
+ "math.Sin": ext۰NoEffect,
+ "math.Sincos": ext۰NoEffect,
+ "math.Sqrt": ext۰NoEffect,
+ "math.Tan": ext۰NoEffect,
+ "math.Trunc": ext۰NoEffect,
+ "math/big.addMulVVW": ext۰NoEffect,
+ "math/big.addVV": ext۰NoEffect,
+ "math/big.addVW": ext۰NoEffect,
+ "math/big.bitLen": ext۰NoEffect,
+ "math/big.divWVW": ext۰NoEffect,
+ "math/big.divWW": ext۰NoEffect,
+ "math/big.mulAddVWW": ext۰NoEffect,
+ "math/big.mulWW": ext۰NoEffect,
+ "math/big.shlVU": ext۰NoEffect,
+ "math/big.shrVU": ext۰NoEffect,
+ "math/big.subVV": ext۰NoEffect,
+ "math/big.subVW": ext۰NoEffect,
+ "net.runtime_Semacquire": ext۰NoEffect,
+ "net.runtime_Semrelease": ext۰NoEffect,
+ "net.runtime_pollClose": ext۰NoEffect,
+ "net.runtime_pollOpen": ext۰NoEffect,
+ "net.runtime_pollReset": ext۰NoEffect,
+ "net.runtime_pollServerInit": ext۰NoEffect,
+ "net.runtime_pollSetDeadline": ext۰NoEffect,
+ "net.runtime_pollUnblock": ext۰NoEffect,
+ "net.runtime_pollWait": ext۰NoEffect,
+ "net.runtime_pollWaitCanceled": ext۰NoEffect,
+ "os.epipecheck": ext۰NoEffect,
+ "runtime.BlockProfile": ext۰NoEffect,
+ "runtime.Breakpoint": ext۰NoEffect,
+ "runtime.CPUProfile": ext۰NoEffect, // good enough
+ "runtime.Caller": ext۰NoEffect,
+ "runtime.Callers": ext۰NoEffect, // good enough
+ "runtime.FuncForPC": ext۰NoEffect,
+ "runtime.GC": ext۰NoEffect,
+ "runtime.GOMAXPROCS": ext۰NoEffect,
+ "runtime.Goexit": ext۰NoEffect,
+ "runtime.GoroutineProfile": ext۰NoEffect,
+ "runtime.Gosched": ext۰NoEffect,
+ "runtime.MemProfile": ext۰NoEffect,
+ "runtime.NumCPU": ext۰NoEffect,
+ "runtime.NumGoroutine": ext۰NoEffect,
+ "runtime.ReadMemStats": ext۰NoEffect,
+ "runtime.SetBlockProfileRate": ext۰NoEffect,
+ "runtime.SetCPUProfileRate": ext۰NoEffect,
+ "runtime.SetFinalizer": ext۰runtime۰SetFinalizer,
+ "runtime.Stack": ext۰NoEffect,
+ "runtime.ThreadCreateProfile": ext۰NoEffect,
+ "runtime.cstringToGo": ext۰NoEffect,
+ "runtime.funcentry_go": ext۰NoEffect,
+ "runtime.funcline_go": ext۰NoEffect,
+ "runtime.funcname_go": ext۰NoEffect,
+ "runtime.getgoroot": ext۰NoEffect,
+ "runtime/pprof.runtime_cyclesPerSecond": ext۰NoEffect,
+ "strings.IndexByte": ext۰NoEffect,
+ "sync.runtime_Semacquire": ext۰NoEffect,
+ "sync.runtime_Semrelease": ext۰NoEffect,
+ "sync.runtime_Syncsemacquire": ext۰NoEffect,
+ "sync.runtime_Syncsemcheck": ext۰NoEffect,
+ "sync.runtime_Syncsemrelease": ext۰NoEffect,
+ "sync.runtime_procPin": ext۰NoEffect,
+ "sync.runtime_procUnpin": ext۰NoEffect,
+ "sync.runtime_registerPool": ext۰NoEffect,
+ "sync/atomic.AddInt32": ext۰NoEffect,
+ "sync/atomic.AddInt64": ext۰NoEffect,
+ "sync/atomic.AddUint32": ext۰NoEffect,
+ "sync/atomic.AddUint64": ext۰NoEffect,
+ "sync/atomic.AddUintptr": ext۰NoEffect,
+ "sync/atomic.CompareAndSwapInt32": ext۰NoEffect,
+ "sync/atomic.CompareAndSwapUint32": ext۰NoEffect,
+ "sync/atomic.CompareAndSwapUint64": ext۰NoEffect,
+ "sync/atomic.CompareAndSwapUintptr": ext۰NoEffect,
+ "sync/atomic.LoadInt32": ext۰NoEffect,
+ "sync/atomic.LoadInt64": ext۰NoEffect,
+ "sync/atomic.LoadPointer": ext۰NoEffect, // ignore unsafe.Pointers
+ "sync/atomic.LoadUint32": ext۰NoEffect,
+ "sync/atomic.LoadUint64": ext۰NoEffect,
+ "sync/atomic.LoadUintptr": ext۰NoEffect,
+ "sync/atomic.StoreInt32": ext۰NoEffect,
+ "sync/atomic.StorePointer": ext۰NoEffect, // ignore unsafe.Pointers
+ "sync/atomic.StoreUint32": ext۰NoEffect,
+ "sync/atomic.StoreUintptr": ext۰NoEffect,
+ "syscall.Close": ext۰NoEffect,
+ "syscall.Exit": ext۰NoEffect,
+ "syscall.Getpid": ext۰NoEffect,
+ "syscall.Getwd": ext۰NoEffect,
+ "syscall.Kill": ext۰NoEffect,
+ "syscall.RawSyscall": ext۰NoEffect,
+ "syscall.RawSyscall6": ext۰NoEffect,
+ "syscall.Syscall": ext۰NoEffect,
+ "syscall.Syscall6": ext۰NoEffect,
+ "syscall.runtime_AfterFork": ext۰NoEffect,
+ "syscall.runtime_BeforeFork": ext۰NoEffect,
+ "syscall.setenv_c": ext۰NoEffect,
+ "time.Sleep": ext۰NoEffect,
+ "time.now": ext۰NoEffect,
+ "time.startTimer": ext۰time۰startTimer,
+ "time.stopTimer": ext۰NoEffect,
+ } {
+ intrinsicsByName[name] = fn
+ }
+}
+
+// findIntrinsic returns the constraint generation function for an
+// intrinsic function fn, or nil if the function should be handled normally.
+//
+func (a *analysis) findIntrinsic(fn *ssa.Function) intrinsic {
+ // Consult the *Function-keyed cache.
+ // A cached nil indicates a normal non-intrinsic function.
+ impl, ok := a.intrinsics[fn]
+ if !ok {
+ impl = intrinsicsByName[fn.String()] // may be nil
+
+ if a.isReflect(fn) {
+ if !a.config.Reflection {
+ impl = ext۰NoEffect // reflection disabled
+ } else if impl == nil {
+ // Ensure all "reflect" code is treated intrinsically.
+ impl = ext۰NotYetImplemented
+ }
+ }
+
+ a.intrinsics[fn] = impl
+ }
+ return impl
+}
+
+// isReflect reports whether fn belongs to the "reflect" package.
+func (a *analysis) isReflect(fn *ssa.Function) bool {
+ if a.reflectValueObj == nil {
+ return false // "reflect" package not loaded
+ }
+ reflectPackage := a.reflectValueObj.Pkg()
+ if fn.Pkg != nil && fn.Pkg.Object == reflectPackage {
+ return true
+ }
+ // Synthetic wrappers have a nil Pkg, so they slip through the
+ // previous check. Check the receiver package.
+ // TODO(adonovan): should synthetic wrappers have a non-nil Pkg?
+ if recv := fn.Signature.Recv(); recv != nil {
+ if named, ok := deref(recv.Type()).(*types.Named); ok {
+ if named.Obj().Pkg() == reflectPackage {
+ return true // e.g. wrapper of (reflect.Value).f
+ }
+ }
+ }
+ return false
+}
+
+// A trivial intrinsic suitable for any function that does not:
+// 1) induce aliases between its arguments or any global variables;
+// 2) call any functions; or
+// 3) create any labels.
+//
+// Many intrinsics (such as CompareAndSwapInt32) have a fourth kind of
+// effect: loading or storing through a pointer. Though these could
+// be significant, we deliberately ignore them because they are
+// generally not worth the effort.
+//
+// We sometimes violate condition #3 if the function creates only
+// non-function labels, as the control-flow graph is still sound.
+//
+func ext۰NoEffect(a *analysis, cgn *cgnode) {}
+
+func ext۰NotYetImplemented(a *analysis, cgn *cgnode) {
+ fn := cgn.fn
+ a.warnf(fn.Pos(), "unsound: intrinsic treatment of %s not yet implemented", fn)
+}
+
+// ---------- func runtime.SetFinalizer(x, f interface{}) ----------
+
+// runtime.SetFinalizer(x, f)
+type runtimeSetFinalizerConstraint struct {
+ targets nodeid // (indirect)
+ f nodeid // (ptr)
+ x nodeid
+}
+
+func (c *runtimeSetFinalizerConstraint) ptr() nodeid { return c.f }
+func (c *runtimeSetFinalizerConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.targets), "SetFinalizer.targets")
+}
+func (c *runtimeSetFinalizerConstraint) renumber(mapping []nodeid) {
+ c.targets = mapping[c.targets]
+ c.f = mapping[c.f]
+ c.x = mapping[c.x]
+}
+
+func (c *runtimeSetFinalizerConstraint) String() string {
+ return fmt.Sprintf("runtime.SetFinalizer(n%d, n%d)", c.x, c.f)
+}
+
+func (c *runtimeSetFinalizerConstraint) solve(a *analysis, delta *nodeset) {
+ for _, fObj := range delta.AppendTo(a.deltaSpace) {
+ tDyn, f, indirect := a.taggedValue(nodeid(fObj))
+ if indirect {
+ // TODO(adonovan): we'll need to implement this
+ // when we start creating indirect tagged objects.
+ panic("indirect tagged object")
+ }
+
+ tSig, ok := tDyn.Underlying().(*types.Signature)
+ if !ok {
+ continue // not a function
+ }
+ if tSig.Recv() != nil {
+ panic(tSig)
+ }
+ if tSig.Params().Len() != 1 {
+ continue // not a unary function
+ }
+
+ // Extract x to tmp.
+ tx := tSig.Params().At(0).Type()
+ tmp := a.addNodes(tx, "SetFinalizer.tmp")
+ a.typeAssert(tx, tmp, c.x, false)
+
+ // Call f(tmp).
+ a.store(f, tmp, 1, a.sizeof(tx))
+
+ // Add dynamic call target.
+ if a.onlineCopy(c.targets, f) {
+ a.addWork(c.targets)
+ }
+ }
+}
+
+func ext۰runtime۰SetFinalizer(a *analysis, cgn *cgnode) {
+ // This is the shared contour, used for dynamic calls.
+ targets := a.addOneNode(tInvalid, "SetFinalizer.targets", nil)
+ cgn.sites = append(cgn.sites, &callsite{targets: targets})
+ params := a.funcParams(cgn.obj)
+ a.addConstraint(&runtimeSetFinalizerConstraint{
+ targets: targets,
+ x: params,
+ f: params + 1,
+ })
+}
+
+// ---------- func time.startTimer(t *runtimeTimer) ----------
+
+// time.StartTimer(t)
+type timeStartTimerConstraint struct {
+ targets nodeid // (indirect)
+ t nodeid // (ptr)
+}
+
+func (c *timeStartTimerConstraint) ptr() nodeid { return c.t }
+func (c *timeStartTimerConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.targets), "StartTimer.targets")
+}
+func (c *timeStartTimerConstraint) renumber(mapping []nodeid) {
+ c.targets = mapping[c.targets]
+ c.t = mapping[c.t]
+}
+
+func (c *timeStartTimerConstraint) String() string {
+ return fmt.Sprintf("time.startTimer(n%d)", c.t)
+}
+
+func (c *timeStartTimerConstraint) solve(a *analysis, delta *nodeset) {
+ for _, tObj := range delta.AppendTo(a.deltaSpace) {
+ t := nodeid(tObj)
+
+ // We model startTimer as if it was defined thus:
+ // func startTimer(t *runtimeTimer) { t.f(t.arg) }
+
+ // We hard-code the field offsets of time.runtimeTimer:
+ // type runtimeTimer struct {
+ // 0 __identity__
+ // 1 i int32
+ // 2 when int64
+ // 3 period int64
+ // 4 f func(int64, interface{})
+ // 5 arg interface{}
+ // }
+ f := t + 4
+ arg := t + 5
+
+ // store t.arg to t.f.params[0]
+ // (offset 1 => skip identity)
+ a.store(f, arg, 1, 1)
+
+ // Add dynamic call target.
+ if a.onlineCopy(c.targets, f) {
+ a.addWork(c.targets)
+ }
+ }
+}
+
+func ext۰time۰startTimer(a *analysis, cgn *cgnode) {
+ // This is the shared contour, used for dynamic calls.
+ targets := a.addOneNode(tInvalid, "startTimer.targets", nil)
+ cgn.sites = append(cgn.sites, &callsite{targets: targets})
+ params := a.funcParams(cgn.obj)
+ a.addConstraint(&timeStartTimerConstraint{
+ targets: targets,
+ t: params,
+ })
+}
diff --git a/llgo/third_party/go.tools/go/pointer/labels.go b/llgo/third_party/go.tools/go/pointer/labels.go
new file mode 100644
index 0000000000000000000000000000000000000000..aaa8397cf55f507925fc8f691645f3c99475a257
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/labels.go
@@ -0,0 +1,152 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pointer
+
+import (
+ "fmt"
+ "go/token"
+ "strings"
+
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// A Label is an entity that may be pointed to by a pointer, map,
+// channel, 'func', slice or interface.
+//
+// Labels include:
+// - functions
+// - globals
+// - tagged objects, representing interfaces and reflect.Values
+// - arrays created by conversions (e.g. []byte("foo"), []byte(s))
+// - stack- and heap-allocated variables (including composite literals)
+// - channels, maps and arrays created by make()
+// - intrinsic or reflective operations that allocate (e.g. append, reflect.New)
+// - intrinsic objects, e.g. the initial array behind os.Args.
+// - and their subelements, e.g. "alloc.y[*].z"
+//
+// Labels are so varied that they defy good generalizations;
+// some have no value, no callgraph node, or no position.
+// Many objects have types that are inexpressible in Go:
+// maps, channels, functions, tagged objects.
+//
+// At most one of Value() or ReflectType() may return non-nil.
+//
+type Label struct {
+ obj *object // the addressable memory location containing this label
+ subelement *fieldInfo // subelement path within obj, e.g. ".a.b[*].c"
+}
+
+// Value returns the ssa.Value that allocated this label's object, if any.
+func (l Label) Value() ssa.Value {
+ val, _ := l.obj.data.(ssa.Value)
+ return val
+}
+
+// ReflectType returns the type represented by this label if it is an
+// reflect.rtype instance object or *reflect.rtype-tagged object.
+//
+func (l Label) ReflectType() types.Type {
+ rtype, _ := l.obj.data.(types.Type)
+ return rtype
+}
+
+// Path returns the path to the subelement of the object containing
+// this label. For example, ".x[*].y".
+//
+func (l Label) Path() string {
+ return l.subelement.path()
+}
+
+// Pos returns the position of this label, if known, zero otherwise.
+func (l Label) Pos() token.Pos {
+ switch data := l.obj.data.(type) {
+ case ssa.Value:
+ return data.Pos()
+ case types.Type:
+ if nt, ok := deref(data).(*types.Named); ok {
+ return nt.Obj().Pos()
+ }
+ }
+ if cgn := l.obj.cgn; cgn != nil {
+ return cgn.fn.Pos()
+ }
+ return token.NoPos
+}
+
+// String returns the printed form of this label.
+//
+// Examples: Object type:
+// x (a variable)
+// (sync.Mutex).Lock (a function)
+// convert (array created by conversion)
+// makemap (map allocated via make)
+// makechan (channel allocated via make)
+// makeinterface (tagged object allocated by makeinterface)
+// (allocation in instrinsic)
+// sync.Mutex (a reflect.rtype instance)
+// (an intrinsic object)
+//
+// Labels within compound objects have subelement paths:
+// x.y[*].z (a struct variable, x)
+// append.y[*].z (array allocated by append)
+// makeslice.y[*].z (array allocated via make)
+//
+// TODO(adonovan): expose func LabelString(*types.Package, Label).
+//
+func (l Label) String() string {
+ var s string
+ switch v := l.obj.data.(type) {
+ case types.Type:
+ return v.String()
+
+ case string:
+ s = v // an intrinsic object (e.g. os.Args[*])
+
+ case nil:
+ if l.obj.cgn != nil {
+ // allocation by intrinsic or reflective operation
+ s = fmt.Sprintf("", l.obj.cgn.fn)
+ } else {
+ s = "" // should be unreachable
+ }
+
+ case *ssa.Function:
+ s = v.String()
+
+ case *ssa.Global:
+ s = v.String()
+
+ case *ssa.Const:
+ s = v.Name()
+
+ case *ssa.Alloc:
+ s = v.Comment
+ if s == "" {
+ s = "alloc"
+ }
+
+ case *ssa.Call:
+ // Currently only calls to append can allocate objects.
+ if v.Call.Value.(*ssa.Builtin).Object().Name() != "append" {
+ panic("unhandled *ssa.Call label: " + v.Name())
+ }
+ s = "append"
+
+ case *ssa.MakeMap, *ssa.MakeChan, *ssa.MakeSlice, *ssa.Convert:
+ s = strings.ToLower(strings.TrimPrefix(fmt.Sprintf("%T", v), "*ssa."))
+
+ case *ssa.MakeInterface:
+ // MakeInterface is usually implicit in Go source (so
+ // Pos()==0), and tagged objects may be allocated
+ // synthetically (so no *MakeInterface data).
+ s = "makeinterface:" + v.X.Type().String()
+
+ default:
+ panic(fmt.Sprintf("unhandled object data type: %T", v))
+ }
+
+ return s + l.subelement.path()
+}
diff --git a/llgo/third_party/go.tools/go/pointer/opt.go b/llgo/third_party/go.tools/go/pointer/opt.go
new file mode 100644
index 0000000000000000000000000000000000000000..2620cc0d8f9dbef1be95e70681dfd58289352a42
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/opt.go
@@ -0,0 +1,125 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pointer
+
+// This file implements renumbering, a pre-solver optimization to
+// improve the efficiency of the solver's points-to set representation.
+//
+// TODO(adonovan): rename file "renumber.go"
+
+import "fmt"
+
+// renumber permutes a.nodes so that all nodes within an addressable
+// object appear before all non-addressable nodes, maintaining the
+// order of nodes within the same object (as required by offsetAddr).
+//
+// renumber must update every nodeid in the analysis (constraints,
+// Pointers, callgraph, etc) to reflect the new ordering.
+//
+// This is an optimisation to increase the locality and efficiency of
+// sparse representations of points-to sets. (Typically only about
+// 20% of nodes are within an object.)
+//
+// NB: nodes added during solving (e.g. for reflection, SetFinalizer)
+// will be appended to the end.
+//
+// Renumbering makes the PTA log inscrutable. To aid debugging, later
+// phases (e.g. HVN) must not rely on it having occurred.
+//
+func (a *analysis) renumber() {
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\n\n==== Renumbering\n\n")
+ }
+
+ N := nodeid(len(a.nodes))
+ newNodes := make([]*node, N, N)
+ renumbering := make([]nodeid, N, N) // maps old to new
+
+ var i, j nodeid
+
+ // The zero node is special.
+ newNodes[j] = a.nodes[i]
+ renumbering[i] = j
+ i++
+ j++
+
+ // Pass 1: object nodes.
+ for i < N {
+ obj := a.nodes[i].obj
+ if obj == nil {
+ i++
+ continue
+ }
+
+ end := i + nodeid(obj.size)
+ for i < end {
+ newNodes[j] = a.nodes[i]
+ renumbering[i] = j
+ i++
+ j++
+ }
+ }
+ nobj := j
+
+ // Pass 2: non-object nodes.
+ for i = 1; i < N; {
+ obj := a.nodes[i].obj
+ if obj != nil {
+ i += nodeid(obj.size)
+ continue
+ }
+
+ newNodes[j] = a.nodes[i]
+ renumbering[i] = j
+ i++
+ j++
+ }
+
+ if j != N {
+ panic(fmt.Sprintf("internal error: j=%d, N=%d", j, N))
+ }
+
+ // Log the remapping table.
+ if a.log != nil {
+ fmt.Fprintf(a.log, "Renumbering nodes to improve density:\n")
+ fmt.Fprintf(a.log, "(%d object nodes of %d total)\n", nobj, N)
+ for old, new := range renumbering {
+ fmt.Fprintf(a.log, "\tn%d -> n%d\n", old, new)
+ }
+ }
+
+ // Now renumber all existing nodeids to use the new node permutation.
+ // It is critical that all reachable nodeids are accounted for!
+
+ // Renumber nodeids in queried Pointers.
+ for v, ptr := range a.result.Queries {
+ ptr.n = renumbering[ptr.n]
+ a.result.Queries[v] = ptr
+ }
+ for v, ptr := range a.result.IndirectQueries {
+ ptr.n = renumbering[ptr.n]
+ a.result.IndirectQueries[v] = ptr
+ }
+
+ // Renumber nodeids in global objects.
+ for v, id := range a.globalobj {
+ a.globalobj[v] = renumbering[id]
+ }
+
+ // Renumber nodeids in constraints.
+ for _, c := range a.constraints {
+ c.renumber(renumbering)
+ }
+
+ // Renumber nodeids in the call graph.
+ for _, cgn := range a.cgnodes {
+ cgn.obj = renumbering[cgn.obj]
+ for _, site := range cgn.sites {
+ site.targets = renumbering[site.targets]
+ }
+ }
+
+ a.nodes = newNodes
+}
diff --git a/llgo/third_party/go.tools/go/pointer/pointer_test.go b/llgo/third_party/go.tools/go/pointer/pointer_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..d64d762c83b400583340d21ff3f7d88e3545b199
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/pointer_test.go
@@ -0,0 +1,579 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pointer_test
+
+// This test uses 'expectation' comments embedded within testdata/*.go
+// files to specify the expected pointer analysis behaviour.
+// See below for grammar.
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+ "testing"
+
+ "llvm.org/llgo/third_party/go.tools/go/callgraph"
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/pointer"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/ssa/ssautil"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llgo/third_party/go.tools/go/types/typeutil"
+)
+
+var inputs = []string{
+ "testdata/a_test.go",
+ "testdata/another.go",
+ "testdata/arrayreflect.go",
+ "testdata/arrays.go",
+ "testdata/channels.go",
+ "testdata/chanreflect.go",
+ "testdata/context.go",
+ "testdata/conv.go",
+ "testdata/finalizer.go",
+ "testdata/flow.go",
+ "testdata/fmtexcerpt.go",
+ "testdata/func.go",
+ "testdata/funcreflect.go",
+ "testdata/hello.go", // NB: causes spurious failure of HVN cross-check
+ "testdata/interfaces.go",
+ "testdata/issue9002.go",
+ "testdata/mapreflect.go",
+ "testdata/maps.go",
+ "testdata/panic.go",
+ "testdata/recur.go",
+ "testdata/reflect.go",
+ "testdata/rtti.go",
+ "testdata/structreflect.go",
+ "testdata/structs.go",
+ "testdata/timer.go",
+}
+
+// Expectation grammar:
+//
+// @calls f -> g
+//
+// A 'calls' expectation asserts that edge (f, g) appears in the
+// callgraph. f and g are notated as per Function.String(), which
+// may contain spaces (e.g. promoted method in anon struct).
+//
+// @pointsto a | b | c
+//
+// A 'pointsto' expectation asserts that the points-to set of its
+// operand contains exactly the set of labels {a,b,c} notated as per
+// labelString.
+//
+// A 'pointsto' expectation must appear on the same line as a
+// print(x) statement; the expectation's operand is x.
+//
+// If one of the strings is "...", the expectation asserts that the
+// points-to set at least the other labels.
+//
+// We use '|' because label names may contain spaces, e.g. methods
+// of anonymous structs.
+//
+// From a theoretical perspective, concrete types in interfaces are
+// labels too, but they are represented differently and so have a
+// different expectation, @types, below.
+//
+// @types t | u | v
+//
+// A 'types' expectation asserts that the set of possible dynamic
+// types of its interface operand is exactly {t,u,v}, notated per
+// go/types.Type.String(). In other words, it asserts that the type
+// component of the interface may point to that set of concrete type
+// literals. It also works for reflect.Value, though the types
+// needn't be concrete in that case.
+//
+// A 'types' expectation must appear on the same line as a
+// print(x) statement; the expectation's operand is x.
+//
+// If one of the strings is "...", the expectation asserts that the
+// interface's type may point to at least the other types.
+//
+// We use '|' because type names may contain spaces.
+//
+// @warning "regexp"
+//
+// A 'warning' expectation asserts that the analysis issues a
+// warning that matches the regular expression within the string
+// literal.
+//
+// @line id
+//
+// A line directive associates the name "id" with the current
+// file:line. The string form of labels will use this id instead of
+// a file:line, making @pointsto expectations more robust against
+// perturbations in the source file.
+// (NB, anon functions still include line numbers.)
+//
+type expectation struct {
+ kind string // "pointsto" | "types" | "calls" | "warning"
+ filename string
+ linenum int // source line number, 1-based
+ args []string
+ types []types.Type // for types
+}
+
+func (e *expectation) String() string {
+ return fmt.Sprintf("@%s[%s]", e.kind, strings.Join(e.args, " | "))
+}
+
+func (e *expectation) errorf(format string, args ...interface{}) {
+ fmt.Printf("%s:%d: ", e.filename, e.linenum)
+ fmt.Printf(format, args...)
+ fmt.Println()
+}
+
+func (e *expectation) needsProbe() bool {
+ return e.kind == "pointsto" || e.kind == "types"
+}
+
+// Find probe (call to print(x)) of same source file/line as expectation.
+func findProbe(prog *ssa.Program, probes map[*ssa.CallCommon]bool, queries map[ssa.Value]pointer.Pointer, e *expectation) (site *ssa.CallCommon, pts pointer.PointsToSet) {
+ for call := range probes {
+ pos := prog.Fset.Position(call.Pos())
+ if pos.Line == e.linenum && pos.Filename == e.filename {
+ // TODO(adonovan): send this to test log (display only on failure).
+ // fmt.Printf("%s:%d: info: found probe for %s: %s\n",
+ // e.filename, e.linenum, e, p.arg0) // debugging
+ return call, queries[call.Args[0]].PointsTo()
+ }
+ }
+ return // e.g. analysis didn't reach this call
+}
+
+func doOneInput(input, filename string) bool {
+ conf := loader.Config{SourceImports: true}
+
+ // Parsing.
+ f, err := conf.ParseFile(filename, input)
+ if err != nil {
+ fmt.Println(err)
+ return false
+ }
+
+ // Create single-file main package and import its dependencies.
+ conf.CreateFromFiles("main", f)
+ iprog, err := conf.Load()
+ if err != nil {
+ fmt.Println(err)
+ return false
+ }
+ mainPkgInfo := iprog.Created[0].Pkg
+
+ // SSA creation + building.
+ prog := ssa.Create(iprog, ssa.SanityCheckFunctions)
+ prog.BuildAll()
+
+ mainpkg := prog.Package(mainPkgInfo)
+ ptrmain := mainpkg // main package for the pointer analysis
+ if mainpkg.Func("main") == nil {
+ // No main function; assume it's a test.
+ ptrmain = prog.CreateTestMainPackage(mainpkg)
+ }
+
+ // Find all calls to the built-in print(x). Analytically,
+ // print is a no-op, but it's a convenient hook for testing
+ // the PTS of an expression, so our tests use it.
+ probes := make(map[*ssa.CallCommon]bool)
+ for fn := range ssautil.AllFunctions(prog) {
+ if fn.Pkg == mainpkg {
+ for _, b := range fn.Blocks {
+ for _, instr := range b.Instrs {
+ if instr, ok := instr.(ssa.CallInstruction); ok {
+ if b, ok := instr.Common().Value.(*ssa.Builtin); ok && b.Name() == "print" {
+ probes[instr.Common()] = true
+ }
+ }
+ }
+ }
+ }
+ }
+
+ ok := true
+
+ lineMapping := make(map[string]string) // maps "file:line" to @line tag
+
+ // Parse expectations in this input.
+ var exps []*expectation
+ re := regexp.MustCompile("// *@([a-z]*) *(.*)$")
+ lines := strings.Split(input, "\n")
+ for linenum, line := range lines {
+ linenum++ // make it 1-based
+ if matches := re.FindAllStringSubmatch(line, -1); matches != nil {
+ match := matches[0]
+ kind, rest := match[1], match[2]
+ e := &expectation{kind: kind, filename: filename, linenum: linenum}
+
+ if kind == "line" {
+ if rest == "" {
+ ok = false
+ e.errorf("@%s expectation requires identifier", kind)
+ } else {
+ lineMapping[fmt.Sprintf("%s:%d", filename, linenum)] = rest
+ }
+ continue
+ }
+
+ if e.needsProbe() && !strings.Contains(line, "print(") {
+ ok = false
+ e.errorf("@%s expectation must follow call to print(x)", kind)
+ continue
+ }
+
+ switch kind {
+ case "pointsto":
+ e.args = split(rest, "|")
+
+ case "types":
+ for _, typstr := range split(rest, "|") {
+ var t types.Type = types.Typ[types.Invalid] // means "..."
+ if typstr != "..." {
+ texpr, err := parser.ParseExpr(typstr)
+ if err != nil {
+ ok = false
+ // Don't print err since its location is bad.
+ e.errorf("'%s' is not a valid type", typstr)
+ continue
+ }
+ mainFileScope := mainpkg.Object.Scope().Child(0)
+ t, _, err = types.EvalNode(prog.Fset, texpr, mainpkg.Object, mainFileScope)
+ if err != nil {
+ ok = false
+ // Don't print err since its location is bad.
+ e.errorf("'%s' is not a valid type: %s", typstr, err)
+ continue
+ }
+ }
+ e.types = append(e.types, t)
+ }
+
+ case "calls":
+ e.args = split(rest, "->")
+ // TODO(adonovan): eagerly reject the
+ // expectation if fn doesn't denote
+ // existing function, rather than fail
+ // the expectation after analysis.
+ if len(e.args) != 2 {
+ ok = false
+ e.errorf("@calls expectation wants 'caller -> callee' arguments")
+ continue
+ }
+
+ case "warning":
+ lit, err := strconv.Unquote(strings.TrimSpace(rest))
+ if err != nil {
+ ok = false
+ e.errorf("couldn't parse @warning operand: %s", err.Error())
+ continue
+ }
+ e.args = append(e.args, lit)
+
+ default:
+ ok = false
+ e.errorf("unknown expectation kind: %s", e)
+ continue
+ }
+ exps = append(exps, e)
+ }
+ }
+
+ var log bytes.Buffer
+ fmt.Fprintf(&log, "Input: %s\n", filename)
+
+ // Run the analysis.
+ config := &pointer.Config{
+ Reflection: true,
+ BuildCallGraph: true,
+ Mains: []*ssa.Package{ptrmain},
+ Log: &log,
+ }
+ for probe := range probes {
+ v := probe.Args[0]
+ if pointer.CanPoint(v.Type()) {
+ config.AddQuery(v)
+ }
+ }
+
+ // Print the log is there was an error or a panic.
+ complete := false
+ defer func() {
+ if !complete || !ok {
+ log.WriteTo(os.Stderr)
+ }
+ }()
+
+ result, err := pointer.Analyze(config)
+ if err != nil {
+ panic(err) // internal error in pointer analysis
+ }
+
+ // Check the expectations.
+ for _, e := range exps {
+ var call *ssa.CallCommon
+ var pts pointer.PointsToSet
+ var tProbe types.Type
+ if e.needsProbe() {
+ if call, pts = findProbe(prog, probes, result.Queries, e); call == nil {
+ ok = false
+ e.errorf("unreachable print() statement has expectation %s", e)
+ continue
+ }
+ tProbe = call.Args[0].Type()
+ if !pointer.CanPoint(tProbe) {
+ ok = false
+ e.errorf("expectation on non-pointerlike operand: %s", tProbe)
+ continue
+ }
+ }
+
+ switch e.kind {
+ case "pointsto":
+ if !checkPointsToExpectation(e, pts, lineMapping, prog) {
+ ok = false
+ }
+
+ case "types":
+ if !checkTypesExpectation(e, pts, tProbe) {
+ ok = false
+ }
+
+ case "calls":
+ if !checkCallsExpectation(prog, e, result.CallGraph) {
+ ok = false
+ }
+
+ case "warning":
+ if !checkWarningExpectation(prog, e, result.Warnings) {
+ ok = false
+ }
+ }
+ }
+
+ complete = true
+
+ // ok = false // debugging: uncomment to always see log
+
+ return ok
+}
+
+func labelString(l *pointer.Label, lineMapping map[string]string, prog *ssa.Program) string {
+ // Functions and Globals need no pos suffix,
+ // nor do allocations in intrinsic operations
+ // (for which we'll print the function name).
+ switch l.Value().(type) {
+ case nil, *ssa.Function, *ssa.Global:
+ return l.String()
+ }
+
+ str := l.String()
+ if pos := l.Pos(); pos != token.NoPos {
+ // Append the position, using a @line tag instead of a line number, if defined.
+ posn := prog.Fset.Position(pos)
+ s := fmt.Sprintf("%s:%d", posn.Filename, posn.Line)
+ if tag, ok := lineMapping[s]; ok {
+ return fmt.Sprintf("%s@%s:%d", str, tag, posn.Column)
+ }
+ str = fmt.Sprintf("%s@%s", str, posn)
+ }
+ return str
+}
+
+func checkPointsToExpectation(e *expectation, pts pointer.PointsToSet, lineMapping map[string]string, prog *ssa.Program) bool {
+ expected := make(map[string]int)
+ surplus := make(map[string]int)
+ exact := true
+ for _, g := range e.args {
+ if g == "..." {
+ exact = false
+ continue
+ }
+ expected[g]++
+ }
+ // Find the set of labels that the probe's
+ // argument (x in print(x)) may point to.
+ for _, label := range pts.Labels() {
+ name := labelString(label, lineMapping, prog)
+ if expected[name] > 0 {
+ expected[name]--
+ } else if exact {
+ surplus[name]++
+ }
+ }
+ // Report multiset difference:
+ ok := true
+ for _, count := range expected {
+ if count > 0 {
+ ok = false
+ e.errorf("value does not alias these expected labels: %s", join(expected))
+ break
+ }
+ }
+ for _, count := range surplus {
+ if count > 0 {
+ ok = false
+ e.errorf("value may additionally alias these labels: %s", join(surplus))
+ break
+ }
+ }
+ return ok
+}
+
+func checkTypesExpectation(e *expectation, pts pointer.PointsToSet, typ types.Type) bool {
+ var expected typeutil.Map
+ var surplus typeutil.Map
+ exact := true
+ for _, g := range e.types {
+ if g == types.Typ[types.Invalid] {
+ exact = false
+ continue
+ }
+ expected.Set(g, struct{}{})
+ }
+
+ if !pointer.CanHaveDynamicTypes(typ) {
+ e.errorf("@types expectation requires an interface- or reflect.Value-typed operand, got %s", typ)
+ return false
+ }
+
+ // Find the set of types that the probe's
+ // argument (x in print(x)) may contain.
+ for _, T := range pts.DynamicTypes().Keys() {
+ if expected.At(T) != nil {
+ expected.Delete(T)
+ } else if exact {
+ surplus.Set(T, struct{}{})
+ }
+ }
+ // Report set difference:
+ ok := true
+ if expected.Len() > 0 {
+ ok = false
+ e.errorf("interface cannot contain these types: %s", expected.KeysString())
+ }
+ if surplus.Len() > 0 {
+ ok = false
+ e.errorf("interface may additionally contain these types: %s", surplus.KeysString())
+ }
+ return ok
+}
+
+var errOK = errors.New("OK")
+
+func checkCallsExpectation(prog *ssa.Program, e *expectation, cg *callgraph.Graph) bool {
+ found := make(map[string]int)
+ err := callgraph.GraphVisitEdges(cg, func(edge *callgraph.Edge) error {
+ // Name-based matching is inefficient but it allows us to
+ // match functions whose names that would not appear in an
+ // index ("") or which are not unique ("func@1.2").
+ if edge.Caller.Func.String() == e.args[0] {
+ calleeStr := edge.Callee.Func.String()
+ if calleeStr == e.args[1] {
+ return errOK // expectation satisified; stop the search
+ }
+ found[calleeStr]++
+ }
+ return nil
+ })
+ if err == errOK {
+ return true
+ }
+ if len(found) == 0 {
+ e.errorf("didn't find any calls from %s", e.args[0])
+ }
+ e.errorf("found no call from %s to %s, but only to %s",
+ e.args[0], e.args[1], join(found))
+ return false
+}
+
+func checkWarningExpectation(prog *ssa.Program, e *expectation, warnings []pointer.Warning) bool {
+ // TODO(adonovan): check the position part of the warning too?
+ re, err := regexp.Compile(e.args[0])
+ if err != nil {
+ e.errorf("invalid regular expression in @warning expectation: %s", err.Error())
+ return false
+ }
+
+ if len(warnings) == 0 {
+ e.errorf("@warning %s expectation, but no warnings", strconv.Quote(e.args[0]))
+ return false
+ }
+
+ for _, w := range warnings {
+ if re.MatchString(w.Message) {
+ return true
+ }
+ }
+
+ e.errorf("@warning %s expectation not satised; found these warnings though:", strconv.Quote(e.args[0]))
+ for _, w := range warnings {
+ fmt.Printf("%s: warning: %s\n", prog.Fset.Position(w.Pos), w.Message)
+ }
+ return false
+}
+
+func TestInput(t *testing.T) {
+ ok := true
+
+ wd, err := os.Getwd()
+ if err != nil {
+ t.Errorf("os.Getwd: %s", err)
+ return
+ }
+
+ // 'go test' does a chdir so that relative paths in
+ // diagnostics no longer make sense relative to the invoking
+ // shell's cwd. We print a special marker so that Emacs can
+ // make sense of them.
+ fmt.Fprintf(os.Stderr, "Entering directory `%s'\n", wd)
+
+ for _, filename := range inputs {
+ content, err := ioutil.ReadFile(filename)
+ if err != nil {
+ t.Errorf("couldn't read file '%s': %s", filename, err)
+ continue
+ }
+
+ if !doOneInput(string(content), filename) {
+ ok = false
+ }
+ }
+ if !ok {
+ t.Fail()
+ }
+}
+
+// join joins the elements of multiset with " | "s.
+func join(set map[string]int) string {
+ var buf bytes.Buffer
+ sep := ""
+ for name, count := range set {
+ for i := 0; i < count; i++ {
+ buf.WriteString(sep)
+ sep = " | "
+ buf.WriteString(name)
+ }
+ }
+ return buf.String()
+}
+
+// split returns the list of sep-delimited non-empty strings in s.
+func split(s, sep string) (r []string) {
+ for _, elem := range strings.Split(s, sep) {
+ elem = strings.TrimSpace(elem)
+ if elem != "" {
+ r = append(r, elem)
+ }
+ }
+ return
+}
diff --git a/llgo/third_party/go.tools/go/pointer/print.go b/llgo/third_party/go.tools/go/pointer/print.go
new file mode 100644
index 0000000000000000000000000000000000000000..4f2f4c7ae1266255c978188746ae852e51cc8e1a
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/print.go
@@ -0,0 +1,43 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pointer
+
+import "fmt"
+
+func (c *addrConstraint) String() string {
+ return fmt.Sprintf("addr n%d <- {&n%d}", c.dst, c.src)
+}
+
+func (c *copyConstraint) String() string {
+ return fmt.Sprintf("copy n%d <- n%d", c.dst, c.src)
+}
+
+func (c *loadConstraint) String() string {
+ return fmt.Sprintf("load n%d <- n%d[%d]", c.dst, c.src, c.offset)
+}
+
+func (c *storeConstraint) String() string {
+ return fmt.Sprintf("store n%d[%d] <- n%d", c.dst, c.offset, c.src)
+}
+
+func (c *offsetAddrConstraint) String() string {
+ return fmt.Sprintf("offsetAddr n%d <- n%d.#%d", c.dst, c.src, c.offset)
+}
+
+func (c *typeFilterConstraint) String() string {
+ return fmt.Sprintf("typeFilter n%d <- n%d.(%s)", c.dst, c.src, c.typ)
+}
+
+func (c *untagConstraint) String() string {
+ return fmt.Sprintf("untag n%d <- n%d.(%s)", c.dst, c.src, c.typ)
+}
+
+func (c *invokeConstraint) String() string {
+ return fmt.Sprintf("invoke n%d.%s(n%d ...)", c.iface, c.method.Name(), c.params)
+}
+
+func (n nodeid) String() string {
+ return fmt.Sprintf("n%d", n)
+}
diff --git a/llgo/third_party/go.tools/go/pointer/reflect.go b/llgo/third_party/go.tools/go/pointer/reflect.go
new file mode 100644
index 0000000000000000000000000000000000000000..8a07575197d227c63a051b45baa21fe2f5345a4e
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/reflect.go
@@ -0,0 +1,1971 @@
+package pointer
+
+// This file implements the generation and resolution rules for
+// constraints arising from the use of reflection in the target
+// program. See doc.go for explanation of the representation.
+//
+// For consistency, the names of all parameters match those of the
+// actual functions in the "reflect" package.
+//
+// To avoid proliferation of equivalent labels, intrinsics should
+// memoize as much as possible, like TypeOf and Zero do for their
+// tagged objects.
+//
+// TODO(adonovan): this file is rather subtle. Explain how we derive
+// the implementation of each reflect operator from its spec,
+// including the subtleties of reflect.flag{Addr,RO,Indir}.
+// [Hint: our implementation is as if reflect.flagIndir was always
+// true, i.e. reflect.Values are pointers to tagged objects, there is
+// no inline allocation optimization; and indirect tagged objects (not
+// yet implemented) correspond to reflect.Values with
+// reflect.flagAddr.]
+// A picture would help too.
+//
+// TODO(adonovan): try factoring up the common parts of the majority of
+// these constraints that are single input, single output.
+
+import (
+ "fmt"
+ "reflect"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+func init() {
+ for name, fn := range map[string]intrinsic{
+ // reflect.Value methods.
+ "(reflect.Value).Addr": ext۰reflect۰Value۰Addr,
+ "(reflect.Value).Bool": ext۰NoEffect,
+ "(reflect.Value).Bytes": ext۰reflect۰Value۰Bytes,
+ "(reflect.Value).Call": ext۰reflect۰Value۰Call,
+ "(reflect.Value).CallSlice": ext۰reflect۰Value۰CallSlice,
+ "(reflect.Value).CanAddr": ext۰NoEffect,
+ "(reflect.Value).CanInterface": ext۰NoEffect,
+ "(reflect.Value).CanSet": ext۰NoEffect,
+ "(reflect.Value).Cap": ext۰NoEffect,
+ "(reflect.Value).Close": ext۰NoEffect,
+ "(reflect.Value).Complex": ext۰NoEffect,
+ "(reflect.Value).Convert": ext۰reflect۰Value۰Convert,
+ "(reflect.Value).Elem": ext۰reflect۰Value۰Elem,
+ "(reflect.Value).Field": ext۰reflect۰Value۰Field,
+ "(reflect.Value).FieldByIndex": ext۰reflect۰Value۰FieldByIndex,
+ "(reflect.Value).FieldByName": ext۰reflect۰Value۰FieldByName,
+ "(reflect.Value).FieldByNameFunc": ext۰reflect۰Value۰FieldByNameFunc,
+ "(reflect.Value).Float": ext۰NoEffect,
+ "(reflect.Value).Index": ext۰reflect۰Value۰Index,
+ "(reflect.Value).Int": ext۰NoEffect,
+ "(reflect.Value).Interface": ext۰reflect۰Value۰Interface,
+ "(reflect.Value).InterfaceData": ext۰NoEffect,
+ "(reflect.Value).IsNil": ext۰NoEffect,
+ "(reflect.Value).IsValid": ext۰NoEffect,
+ "(reflect.Value).Kind": ext۰NoEffect,
+ "(reflect.Value).Len": ext۰NoEffect,
+ "(reflect.Value).MapIndex": ext۰reflect۰Value۰MapIndex,
+ "(reflect.Value).MapKeys": ext۰reflect۰Value۰MapKeys,
+ "(reflect.Value).Method": ext۰reflect۰Value۰Method,
+ "(reflect.Value).MethodByName": ext۰reflect۰Value۰MethodByName,
+ "(reflect.Value).NumField": ext۰NoEffect,
+ "(reflect.Value).NumMethod": ext۰NoEffect,
+ "(reflect.Value).OverflowComplex": ext۰NoEffect,
+ "(reflect.Value).OverflowFloat": ext۰NoEffect,
+ "(reflect.Value).OverflowInt": ext۰NoEffect,
+ "(reflect.Value).OverflowUint": ext۰NoEffect,
+ "(reflect.Value).Pointer": ext۰NoEffect,
+ "(reflect.Value).Recv": ext۰reflect۰Value۰Recv,
+ "(reflect.Value).Send": ext۰reflect۰Value۰Send,
+ "(reflect.Value).Set": ext۰reflect۰Value۰Set,
+ "(reflect.Value).SetBool": ext۰NoEffect,
+ "(reflect.Value).SetBytes": ext۰reflect۰Value۰SetBytes,
+ "(reflect.Value).SetComplex": ext۰NoEffect,
+ "(reflect.Value).SetFloat": ext۰NoEffect,
+ "(reflect.Value).SetInt": ext۰NoEffect,
+ "(reflect.Value).SetLen": ext۰NoEffect,
+ "(reflect.Value).SetMapIndex": ext۰reflect۰Value۰SetMapIndex,
+ "(reflect.Value).SetPointer": ext۰reflect۰Value۰SetPointer,
+ "(reflect.Value).SetString": ext۰NoEffect,
+ "(reflect.Value).SetUint": ext۰NoEffect,
+ "(reflect.Value).Slice": ext۰reflect۰Value۰Slice,
+ "(reflect.Value).String": ext۰NoEffect,
+ "(reflect.Value).TryRecv": ext۰reflect۰Value۰Recv,
+ "(reflect.Value).TrySend": ext۰reflect۰Value۰Send,
+ "(reflect.Value).Type": ext۰NoEffect,
+ "(reflect.Value).Uint": ext۰NoEffect,
+ "(reflect.Value).UnsafeAddr": ext۰NoEffect,
+
+ // Standalone reflect.* functions.
+ "reflect.Append": ext۰reflect۰Append,
+ "reflect.AppendSlice": ext۰reflect۰AppendSlice,
+ "reflect.Copy": ext۰reflect۰Copy,
+ "reflect.ChanOf": ext۰reflect۰ChanOf,
+ "reflect.DeepEqual": ext۰NoEffect,
+ "reflect.Indirect": ext۰reflect۰Indirect,
+ "reflect.MakeChan": ext۰reflect۰MakeChan,
+ "reflect.MakeFunc": ext۰reflect۰MakeFunc,
+ "reflect.MakeMap": ext۰reflect۰MakeMap,
+ "reflect.MakeSlice": ext۰reflect۰MakeSlice,
+ "reflect.MapOf": ext۰reflect۰MapOf,
+ "reflect.New": ext۰reflect۰New,
+ "reflect.NewAt": ext۰reflect۰NewAt,
+ "reflect.PtrTo": ext۰reflect۰PtrTo,
+ "reflect.Select": ext۰reflect۰Select,
+ "reflect.SliceOf": ext۰reflect۰SliceOf,
+ "reflect.TypeOf": ext۰reflect۰TypeOf,
+ "reflect.ValueOf": ext۰reflect۰ValueOf,
+ "reflect.Zero": ext۰reflect۰Zero,
+ "reflect.init": ext۰NoEffect,
+
+ // *reflect.rtype methods
+ "(*reflect.rtype).Align": ext۰NoEffect,
+ "(*reflect.rtype).AssignableTo": ext۰NoEffect,
+ "(*reflect.rtype).Bits": ext۰NoEffect,
+ "(*reflect.rtype).ChanDir": ext۰NoEffect,
+ "(*reflect.rtype).ConvertibleTo": ext۰NoEffect,
+ "(*reflect.rtype).Elem": ext۰reflect۰rtype۰Elem,
+ "(*reflect.rtype).Field": ext۰reflect۰rtype۰Field,
+ "(*reflect.rtype).FieldAlign": ext۰NoEffect,
+ "(*reflect.rtype).FieldByIndex": ext۰reflect۰rtype۰FieldByIndex,
+ "(*reflect.rtype).FieldByName": ext۰reflect۰rtype۰FieldByName,
+ "(*reflect.rtype).FieldByNameFunc": ext۰reflect۰rtype۰FieldByNameFunc,
+ "(*reflect.rtype).Implements": ext۰NoEffect,
+ "(*reflect.rtype).In": ext۰reflect۰rtype۰In,
+ "(*reflect.rtype).IsVariadic": ext۰NoEffect,
+ "(*reflect.rtype).Key": ext۰reflect۰rtype۰Key,
+ "(*reflect.rtype).Kind": ext۰NoEffect,
+ "(*reflect.rtype).Len": ext۰NoEffect,
+ "(*reflect.rtype).Method": ext۰reflect۰rtype۰Method,
+ "(*reflect.rtype).MethodByName": ext۰reflect۰rtype۰MethodByName,
+ "(*reflect.rtype).Name": ext۰NoEffect,
+ "(*reflect.rtype).NumField": ext۰NoEffect,
+ "(*reflect.rtype).NumIn": ext۰NoEffect,
+ "(*reflect.rtype).NumMethod": ext۰NoEffect,
+ "(*reflect.rtype).NumOut": ext۰NoEffect,
+ "(*reflect.rtype).Out": ext۰reflect۰rtype۰Out,
+ "(*reflect.rtype).PkgPath": ext۰NoEffect,
+ "(*reflect.rtype).Size": ext۰NoEffect,
+ "(*reflect.rtype).String": ext۰NoEffect,
+ } {
+ intrinsicsByName[name] = fn
+ }
+}
+
+// -------------------- (reflect.Value) --------------------
+
+func ext۰reflect۰Value۰Addr(a *analysis, cgn *cgnode) {} // TODO(adonovan)
+
+// ---------- func (Value).Bytes() Value ----------
+
+// result = v.Bytes()
+type rVBytesConstraint struct {
+ v nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *rVBytesConstraint) ptr() nodeid { return c.v }
+func (c *rVBytesConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "rVBytes.result")
+}
+func (c *rVBytesConstraint) renumber(mapping []nodeid) {
+ c.v = mapping[c.v]
+ c.result = mapping[c.result]
+}
+
+func (c *rVBytesConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect n%d.Bytes()", c.result, c.v)
+}
+
+func (c *rVBytesConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ vObj := nodeid(x)
+ tDyn, slice, indirect := a.taggedValue(vObj)
+ if indirect {
+ // TODO(adonovan): we'll need to implement this
+ // when we start creating indirect tagged objects.
+ panic("indirect tagged object")
+ }
+
+ tSlice, ok := tDyn.Underlying().(*types.Slice)
+ if ok && types.Identical(tSlice.Elem(), types.Typ[types.Uint8]) {
+ if a.onlineCopy(c.result, slice) {
+ changed = true
+ }
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰Value۰Bytes(a *analysis, cgn *cgnode) {
+ a.addConstraint(&rVBytesConstraint{
+ v: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+// ---------- func (Value).Call(in []Value) []Value ----------
+
+// result = v.Call(in)
+type rVCallConstraint struct {
+ cgn *cgnode
+ targets nodeid // (indirect)
+ v nodeid // (ptr)
+ arg nodeid // = in[*]
+ result nodeid // (indirect)
+ dotdotdot bool // interpret last arg as a "..." slice
+}
+
+func (c *rVCallConstraint) ptr() nodeid { return c.v }
+func (c *rVCallConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.targets), "rVCall.targets")
+ h.markIndirect(onodeid(c.result), "rVCall.result")
+}
+func (c *rVCallConstraint) renumber(mapping []nodeid) {
+ c.targets = mapping[c.targets]
+ c.v = mapping[c.v]
+ c.arg = mapping[c.arg]
+ c.result = mapping[c.result]
+}
+
+func (c *rVCallConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect n%d.Call(n%d)", c.result, c.v, c.arg)
+}
+
+func (c *rVCallConstraint) solve(a *analysis, delta *nodeset) {
+ if c.targets == 0 {
+ panic("no targets")
+ }
+
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ vObj := nodeid(x)
+ tDyn, fn, indirect := a.taggedValue(vObj)
+ if indirect {
+ // TODO(adonovan): we'll need to implement this
+ // when we start creating indirect tagged objects.
+ panic("indirect tagged object")
+ }
+
+ tSig, ok := tDyn.Underlying().(*types.Signature)
+ if !ok {
+ continue // not a function
+ }
+ if tSig.Recv() != nil {
+ panic(tSig) // TODO(adonovan): rethink when we implement Method()
+ }
+
+ // Add dynamic call target.
+ if a.onlineCopy(c.targets, fn) {
+ a.addWork(c.targets)
+ // TODO(adonovan): is 'else continue' a sound optimisation here?
+ }
+
+ // Allocate a P/R block.
+ tParams := tSig.Params()
+ tResults := tSig.Results()
+ params := a.addNodes(tParams, "rVCall.params")
+ results := a.addNodes(tResults, "rVCall.results")
+
+ // Make a dynamic call to 'fn'.
+ a.store(fn, params, 1, a.sizeof(tParams))
+ a.load(results, fn, 1+a.sizeof(tParams), a.sizeof(tResults))
+
+ // Populate P by type-asserting each actual arg (all merged in c.arg).
+ for i, n := 0, tParams.Len(); i < n; i++ {
+ T := tParams.At(i).Type()
+ a.typeAssert(T, params, c.arg, false)
+ params += nodeid(a.sizeof(T))
+ }
+
+ // Use R by tagging and copying each actual result to c.result.
+ for i, n := 0, tResults.Len(); i < n; i++ {
+ T := tResults.At(i).Type()
+ // Convert from an arbitrary type to a reflect.Value
+ // (like MakeInterface followed by reflect.ValueOf).
+ if isInterface(T) {
+ // (don't tag)
+ if a.onlineCopy(c.result, results) {
+ changed = true
+ }
+ } else {
+ obj := a.makeTagged(T, c.cgn, nil)
+ a.onlineCopyN(obj+1, results, a.sizeof(T))
+ if a.addLabel(c.result, obj) { // (true)
+ changed = true
+ }
+ }
+ results += nodeid(a.sizeof(T))
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+// Common code for direct (inlined) and indirect calls to (reflect.Value).Call.
+func reflectCallImpl(a *analysis, cgn *cgnode, site *callsite, recv, arg nodeid, dotdotdot bool) nodeid {
+ // Allocate []reflect.Value array for the result.
+ ret := a.nextNode()
+ a.addNodes(types.NewArray(a.reflectValueObj.Type(), 1), "rVCall.ret")
+ a.endObject(ret, cgn, nil)
+
+ // pts(targets) will be the set of possible call targets.
+ site.targets = a.addOneNode(tInvalid, "rvCall.targets", nil)
+
+ // All arguments are merged since they arrive in a slice.
+ argelts := a.addOneNode(a.reflectValueObj.Type(), "rVCall.args", nil)
+ a.load(argelts, arg, 1, 1) // slice elements
+
+ a.addConstraint(&rVCallConstraint{
+ cgn: cgn,
+ targets: site.targets,
+ v: recv,
+ arg: argelts,
+ result: ret + 1, // results go into elements of ret
+ dotdotdot: dotdotdot,
+ })
+ return ret
+}
+
+func reflectCall(a *analysis, cgn *cgnode, dotdotdot bool) {
+ // This is the shared contour implementation of (reflect.Value).Call
+ // and CallSlice, as used by indirect calls (rare).
+ // Direct calls are inlined in gen.go, eliding the
+ // intermediate cgnode for Call.
+ site := new(callsite)
+ cgn.sites = append(cgn.sites, site)
+ recv := a.funcParams(cgn.obj)
+ arg := recv + 1
+ ret := reflectCallImpl(a, cgn, site, recv, arg, dotdotdot)
+ a.addressOf(cgn.fn.Signature.Results().At(0).Type(), a.funcResults(cgn.obj), ret)
+}
+
+func ext۰reflect۰Value۰Call(a *analysis, cgn *cgnode) {
+ reflectCall(a, cgn, false)
+}
+
+func ext۰reflect۰Value۰CallSlice(a *analysis, cgn *cgnode) {
+ // TODO(adonovan): implement. Also, inline direct calls in gen.go too.
+ if false {
+ reflectCall(a, cgn, true)
+ }
+}
+
+func ext۰reflect۰Value۰Convert(a *analysis, cgn *cgnode) {} // TODO(adonovan)
+
+// ---------- func (Value).Elem() Value ----------
+
+// result = v.Elem()
+type rVElemConstraint struct {
+ cgn *cgnode
+ v nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *rVElemConstraint) ptr() nodeid { return c.v }
+func (c *rVElemConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "rVElem.result")
+}
+func (c *rVElemConstraint) renumber(mapping []nodeid) {
+ c.v = mapping[c.v]
+ c.result = mapping[c.result]
+}
+
+func (c *rVElemConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect n%d.Elem()", c.result, c.v)
+}
+
+func (c *rVElemConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ vObj := nodeid(x)
+ tDyn, payload, indirect := a.taggedValue(vObj)
+ if indirect {
+ // TODO(adonovan): we'll need to implement this
+ // when we start creating indirect tagged objects.
+ panic("indirect tagged object")
+ }
+
+ switch t := tDyn.Underlying().(type) {
+ case *types.Interface:
+ if a.onlineCopy(c.result, payload) {
+ changed = true
+ }
+
+ case *types.Pointer:
+ obj := a.makeTagged(t.Elem(), c.cgn, nil)
+ a.load(obj+1, payload, 0, a.sizeof(t.Elem()))
+ if a.addLabel(c.result, obj) {
+ changed = true
+ }
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰Value۰Elem(a *analysis, cgn *cgnode) {
+ a.addConstraint(&rVElemConstraint{
+ cgn: cgn,
+ v: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+func ext۰reflect۰Value۰Field(a *analysis, cgn *cgnode) {} // TODO(adonovan)
+func ext۰reflect۰Value۰FieldByIndex(a *analysis, cgn *cgnode) {} // TODO(adonovan)
+func ext۰reflect۰Value۰FieldByName(a *analysis, cgn *cgnode) {} // TODO(adonovan)
+func ext۰reflect۰Value۰FieldByNameFunc(a *analysis, cgn *cgnode) {} // TODO(adonovan)
+
+// ---------- func (Value).Index() Value ----------
+
+// result = v.Index()
+type rVIndexConstraint struct {
+ cgn *cgnode
+ v nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *rVIndexConstraint) ptr() nodeid { return c.v }
+func (c *rVIndexConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "rVIndex.result")
+}
+func (c *rVIndexConstraint) renumber(mapping []nodeid) {
+ c.v = mapping[c.v]
+ c.result = mapping[c.result]
+}
+
+func (c *rVIndexConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect n%d.Index()", c.result, c.v)
+}
+
+func (c *rVIndexConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ vObj := nodeid(x)
+ tDyn, payload, indirect := a.taggedValue(vObj)
+ if indirect {
+ // TODO(adonovan): we'll need to implement this
+ // when we start creating indirect tagged objects.
+ panic("indirect tagged object")
+ }
+
+ var res nodeid
+ switch t := tDyn.Underlying().(type) {
+ case *types.Array:
+ res = a.makeTagged(t.Elem(), c.cgn, nil)
+ a.onlineCopyN(res+1, payload+1, a.sizeof(t.Elem()))
+
+ case *types.Slice:
+ res = a.makeTagged(t.Elem(), c.cgn, nil)
+ a.load(res+1, payload, 1, a.sizeof(t.Elem()))
+
+ case *types.Basic:
+ if t.Kind() == types.String {
+ res = a.makeTagged(types.Typ[types.Rune], c.cgn, nil)
+ }
+ }
+ if res != 0 && a.addLabel(c.result, res) {
+ changed = true
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰Value۰Index(a *analysis, cgn *cgnode) {
+ a.addConstraint(&rVIndexConstraint{
+ cgn: cgn,
+ v: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+// ---------- func (Value).Interface() Value ----------
+
+// result = v.Interface()
+type rVInterfaceConstraint struct {
+ v nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *rVInterfaceConstraint) ptr() nodeid { return c.v }
+func (c *rVInterfaceConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "rVInterface.result")
+}
+func (c *rVInterfaceConstraint) renumber(mapping []nodeid) {
+ c.v = mapping[c.v]
+ c.result = mapping[c.result]
+}
+
+func (c *rVInterfaceConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect n%d.Interface()", c.result, c.v)
+}
+
+func (c *rVInterfaceConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ vObj := nodeid(x)
+ tDyn, payload, indirect := a.taggedValue(vObj)
+ if indirect {
+ // TODO(adonovan): we'll need to implement this
+ // when we start creating indirect tagged objects.
+ panic("indirect tagged object")
+ }
+
+ if isInterface(tDyn) {
+ if a.onlineCopy(c.result, payload) {
+ a.addWork(c.result)
+ }
+ } else {
+ if a.addLabel(c.result, vObj) {
+ changed = true
+ }
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰Value۰Interface(a *analysis, cgn *cgnode) {
+ a.addConstraint(&rVInterfaceConstraint{
+ v: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+// ---------- func (Value).MapIndex(Value) Value ----------
+
+// result = v.MapIndex(_)
+type rVMapIndexConstraint struct {
+ cgn *cgnode
+ v nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *rVMapIndexConstraint) ptr() nodeid { return c.v }
+func (c *rVMapIndexConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "rVMapIndex.result")
+}
+func (c *rVMapIndexConstraint) renumber(mapping []nodeid) {
+ c.v = mapping[c.v]
+ c.result = mapping[c.result]
+}
+
+func (c *rVMapIndexConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect n%d.MapIndex(_)", c.result, c.v)
+}
+
+func (c *rVMapIndexConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ vObj := nodeid(x)
+ tDyn, m, indirect := a.taggedValue(vObj)
+ tMap, _ := tDyn.Underlying().(*types.Map)
+ if tMap == nil {
+ continue // not a map
+ }
+ if indirect {
+ // TODO(adonovan): we'll need to implement this
+ // when we start creating indirect tagged objects.
+ panic("indirect tagged object")
+ }
+
+ obj := a.makeTagged(tMap.Elem(), c.cgn, nil)
+ a.load(obj+1, m, a.sizeof(tMap.Key()), a.sizeof(tMap.Elem()))
+ if a.addLabel(c.result, obj) {
+ changed = true
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰Value۰MapIndex(a *analysis, cgn *cgnode) {
+ a.addConstraint(&rVMapIndexConstraint{
+ cgn: cgn,
+ v: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+// ---------- func (Value).MapKeys() []Value ----------
+
+// result = v.MapKeys()
+type rVMapKeysConstraint struct {
+ cgn *cgnode
+ v nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *rVMapKeysConstraint) ptr() nodeid { return c.v }
+func (c *rVMapKeysConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "rVMapKeys.result")
+}
+func (c *rVMapKeysConstraint) renumber(mapping []nodeid) {
+ c.v = mapping[c.v]
+ c.result = mapping[c.result]
+}
+
+func (c *rVMapKeysConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect n%d.MapKeys()", c.result, c.v)
+}
+
+func (c *rVMapKeysConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ vObj := nodeid(x)
+ tDyn, m, indirect := a.taggedValue(vObj)
+ tMap, _ := tDyn.Underlying().(*types.Map)
+ if tMap == nil {
+ continue // not a map
+ }
+ if indirect {
+ // TODO(adonovan): we'll need to implement this
+ // when we start creating indirect tagged objects.
+ panic("indirect tagged object")
+ }
+
+ kObj := a.makeTagged(tMap.Key(), c.cgn, nil)
+ a.load(kObj+1, m, 0, a.sizeof(tMap.Key()))
+ if a.addLabel(c.result, kObj) {
+ changed = true
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰Value۰MapKeys(a *analysis, cgn *cgnode) {
+ // Allocate an array for the result.
+ obj := a.nextNode()
+ T := types.NewSlice(a.reflectValueObj.Type())
+ a.addNodes(sliceToArray(T), "reflect.MapKeys result")
+ a.endObject(obj, cgn, nil)
+ a.addressOf(T, a.funcResults(cgn.obj), obj)
+
+ a.addConstraint(&rVMapKeysConstraint{
+ cgn: cgn,
+ v: a.funcParams(cgn.obj),
+ result: obj + 1, // result is stored in array elems
+ })
+}
+
+func ext۰reflect۰Value۰Method(a *analysis, cgn *cgnode) {} // TODO(adonovan)
+func ext۰reflect۰Value۰MethodByName(a *analysis, cgn *cgnode) {} // TODO(adonovan)
+
+// ---------- func (Value).Recv(Value) Value ----------
+
+// result, _ = v.Recv()
+type rVRecvConstraint struct {
+ cgn *cgnode
+ v nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *rVRecvConstraint) ptr() nodeid { return c.v }
+func (c *rVRecvConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "rVRecv.result")
+}
+func (c *rVRecvConstraint) renumber(mapping []nodeid) {
+ c.v = mapping[c.v]
+ c.result = mapping[c.result]
+}
+
+func (c *rVRecvConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect n%d.Recv()", c.result, c.v)
+}
+
+func (c *rVRecvConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ vObj := nodeid(x)
+ tDyn, ch, indirect := a.taggedValue(vObj)
+ tChan, _ := tDyn.Underlying().(*types.Chan)
+ if tChan == nil {
+ continue // not a channel
+ }
+ if indirect {
+ // TODO(adonovan): we'll need to implement this
+ // when we start creating indirect tagged objects.
+ panic("indirect tagged object")
+ }
+
+ tElem := tChan.Elem()
+ elemObj := a.makeTagged(tElem, c.cgn, nil)
+ a.load(elemObj+1, ch, 0, a.sizeof(tElem))
+ if a.addLabel(c.result, elemObj) {
+ changed = true
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰Value۰Recv(a *analysis, cgn *cgnode) {
+ a.addConstraint(&rVRecvConstraint{
+ cgn: cgn,
+ v: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+// ---------- func (Value).Send(Value) ----------
+
+// v.Send(x)
+type rVSendConstraint struct {
+ cgn *cgnode
+ v nodeid // (ptr)
+ x nodeid
+}
+
+func (c *rVSendConstraint) ptr() nodeid { return c.v }
+func (c *rVSendConstraint) presolve(*hvn) {}
+func (c *rVSendConstraint) renumber(mapping []nodeid) {
+ c.v = mapping[c.v]
+ c.x = mapping[c.x]
+}
+
+func (c *rVSendConstraint) String() string {
+ return fmt.Sprintf("reflect n%d.Send(n%d)", c.v, c.x)
+}
+
+func (c *rVSendConstraint) solve(a *analysis, delta *nodeset) {
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ vObj := nodeid(x)
+ tDyn, ch, indirect := a.taggedValue(vObj)
+ tChan, _ := tDyn.Underlying().(*types.Chan)
+ if tChan == nil {
+ continue // not a channel
+ }
+ if indirect {
+ // TODO(adonovan): we'll need to implement this
+ // when we start creating indirect tagged objects.
+ panic("indirect tagged object")
+ }
+
+ // Extract x's payload to xtmp, then store to channel.
+ tElem := tChan.Elem()
+ xtmp := a.addNodes(tElem, "Send.xtmp")
+ a.typeAssert(tElem, xtmp, c.x, false)
+ a.store(ch, xtmp, 0, a.sizeof(tElem))
+ }
+}
+
+func ext۰reflect۰Value۰Send(a *analysis, cgn *cgnode) {
+ params := a.funcParams(cgn.obj)
+ a.addConstraint(&rVSendConstraint{
+ cgn: cgn,
+ v: params,
+ x: params + 1,
+ })
+}
+
+func ext۰reflect۰Value۰Set(a *analysis, cgn *cgnode) {} // TODO(adonovan)
+
+// ---------- func (Value).SetBytes(x []byte) ----------
+
+// v.SetBytes(x)
+type rVSetBytesConstraint struct {
+ cgn *cgnode
+ v nodeid // (ptr)
+ x nodeid
+}
+
+func (c *rVSetBytesConstraint) ptr() nodeid { return c.v }
+func (c *rVSetBytesConstraint) presolve(*hvn) {}
+func (c *rVSetBytesConstraint) renumber(mapping []nodeid) {
+ c.v = mapping[c.v]
+ c.x = mapping[c.x]
+}
+
+func (c *rVSetBytesConstraint) String() string {
+ return fmt.Sprintf("reflect n%d.SetBytes(n%d)", c.v, c.x)
+}
+
+func (c *rVSetBytesConstraint) solve(a *analysis, delta *nodeset) {
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ vObj := nodeid(x)
+ tDyn, slice, indirect := a.taggedValue(vObj)
+ if indirect {
+ // TODO(adonovan): we'll need to implement this
+ // when we start creating indirect tagged objects.
+ panic("indirect tagged object")
+ }
+
+ tSlice, ok := tDyn.Underlying().(*types.Slice)
+ if ok && types.Identical(tSlice.Elem(), types.Typ[types.Uint8]) {
+ if a.onlineCopy(slice, c.x) {
+ a.addWork(slice)
+ }
+ }
+ }
+}
+
+func ext۰reflect۰Value۰SetBytes(a *analysis, cgn *cgnode) {
+ params := a.funcParams(cgn.obj)
+ a.addConstraint(&rVSetBytesConstraint{
+ cgn: cgn,
+ v: params,
+ x: params + 1,
+ })
+}
+
+// ---------- func (Value).SetMapIndex(k Value, v Value) ----------
+
+// v.SetMapIndex(key, val)
+type rVSetMapIndexConstraint struct {
+ cgn *cgnode
+ v nodeid // (ptr)
+ key nodeid
+ val nodeid
+}
+
+func (c *rVSetMapIndexConstraint) ptr() nodeid { return c.v }
+func (c *rVSetMapIndexConstraint) presolve(*hvn) {}
+func (c *rVSetMapIndexConstraint) renumber(mapping []nodeid) {
+ c.v = mapping[c.v]
+ c.key = mapping[c.key]
+ c.val = mapping[c.val]
+}
+
+func (c *rVSetMapIndexConstraint) String() string {
+ return fmt.Sprintf("reflect n%d.SetMapIndex(n%d, n%d)", c.v, c.key, c.val)
+}
+
+func (c *rVSetMapIndexConstraint) solve(a *analysis, delta *nodeset) {
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ vObj := nodeid(x)
+ tDyn, m, indirect := a.taggedValue(vObj)
+ tMap, _ := tDyn.Underlying().(*types.Map)
+ if tMap == nil {
+ continue // not a map
+ }
+ if indirect {
+ // TODO(adonovan): we'll need to implement this
+ // when we start creating indirect tagged objects.
+ panic("indirect tagged object")
+ }
+
+ keysize := a.sizeof(tMap.Key())
+
+ // Extract key's payload to keytmp, then store to map key.
+ keytmp := a.addNodes(tMap.Key(), "SetMapIndex.keytmp")
+ a.typeAssert(tMap.Key(), keytmp, c.key, false)
+ a.store(m, keytmp, 0, keysize)
+
+ // Extract val's payload to vtmp, then store to map value.
+ valtmp := a.addNodes(tMap.Elem(), "SetMapIndex.valtmp")
+ a.typeAssert(tMap.Elem(), valtmp, c.val, false)
+ a.store(m, valtmp, keysize, a.sizeof(tMap.Elem()))
+ }
+}
+
+func ext۰reflect۰Value۰SetMapIndex(a *analysis, cgn *cgnode) {
+ params := a.funcParams(cgn.obj)
+ a.addConstraint(&rVSetMapIndexConstraint{
+ cgn: cgn,
+ v: params,
+ key: params + 1,
+ val: params + 2,
+ })
+}
+
+func ext۰reflect۰Value۰SetPointer(a *analysis, cgn *cgnode) {} // TODO(adonovan)
+
+// ---------- func (Value).Slice(v Value, i, j int) Value ----------
+
+// result = v.Slice(_, _)
+type rVSliceConstraint struct {
+ cgn *cgnode
+ v nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *rVSliceConstraint) ptr() nodeid { return c.v }
+func (c *rVSliceConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "rVSlice.result")
+}
+func (c *rVSliceConstraint) renumber(mapping []nodeid) {
+ c.v = mapping[c.v]
+ c.result = mapping[c.result]
+}
+
+func (c *rVSliceConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect n%d.Slice(_, _)", c.result, c.v)
+}
+
+func (c *rVSliceConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ vObj := nodeid(x)
+ tDyn, payload, indirect := a.taggedValue(vObj)
+ if indirect {
+ // TODO(adonovan): we'll need to implement this
+ // when we start creating indirect tagged objects.
+ panic("indirect tagged object")
+ }
+
+ var res nodeid
+ switch t := tDyn.Underlying().(type) {
+ case *types.Pointer:
+ if tArr, ok := t.Elem().Underlying().(*types.Array); ok {
+ // pointer to array
+ res = a.makeTagged(types.NewSlice(tArr.Elem()), c.cgn, nil)
+ if a.onlineCopy(res+1, payload) {
+ a.addWork(res + 1)
+ }
+ }
+
+ case *types.Array:
+ // TODO(adonovan): implement addressable
+ // arrays when we do indirect tagged objects.
+
+ case *types.Slice:
+ res = vObj
+
+ case *types.Basic:
+ if t == types.Typ[types.String] {
+ res = vObj
+ }
+ }
+
+ if res != 0 && a.addLabel(c.result, res) {
+ changed = true
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰Value۰Slice(a *analysis, cgn *cgnode) {
+ a.addConstraint(&rVSliceConstraint{
+ cgn: cgn,
+ v: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+// -------------------- Standalone reflect functions --------------------
+
+func ext۰reflect۰Append(a *analysis, cgn *cgnode) {} // TODO(adonovan)
+func ext۰reflect۰AppendSlice(a *analysis, cgn *cgnode) {} // TODO(adonovan)
+func ext۰reflect۰Copy(a *analysis, cgn *cgnode) {} // TODO(adonovan)
+
+// ---------- func ChanOf(ChanDir, Type) Type ----------
+
+// result = ChanOf(dir, t)
+type reflectChanOfConstraint struct {
+ cgn *cgnode
+ t nodeid // (ptr)
+ result nodeid // (indirect)
+ dirs []types.ChanDir
+}
+
+func (c *reflectChanOfConstraint) ptr() nodeid { return c.t }
+func (c *reflectChanOfConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "reflectChanOf.result")
+}
+func (c *reflectChanOfConstraint) renumber(mapping []nodeid) {
+ c.t = mapping[c.t]
+ c.result = mapping[c.result]
+}
+
+func (c *reflectChanOfConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect.ChanOf(n%d)", c.result, c.t)
+}
+
+func (c *reflectChanOfConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ tObj := nodeid(x)
+ T := a.rtypeTaggedValue(tObj)
+
+ if typeTooHigh(T) {
+ continue
+ }
+
+ for _, dir := range c.dirs {
+ if a.addLabel(c.result, a.makeRtype(types.NewChan(dir, T))) {
+ changed = true
+ }
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+// dirMap maps reflect.ChanDir to the set of channel types generated by ChanOf.
+var dirMap = [...][]types.ChanDir{
+ 0: {types.SendOnly, types.RecvOnly, types.SendRecv}, // unknown
+ reflect.RecvDir: {types.RecvOnly},
+ reflect.SendDir: {types.SendOnly},
+ reflect.BothDir: {types.SendRecv},
+}
+
+func ext۰reflect۰ChanOf(a *analysis, cgn *cgnode) {
+ // If we have access to the callsite,
+ // and the channel argument is a constant (as is usual),
+ // only generate the requested direction.
+ var dir reflect.ChanDir // unknown
+ if site := cgn.callersite; site != nil {
+ if c, ok := site.instr.Common().Args[0].(*ssa.Const); ok {
+ v, _ := exact.Int64Val(c.Value)
+ if 0 <= v && v <= int64(reflect.BothDir) {
+ dir = reflect.ChanDir(v)
+ }
+ }
+ }
+
+ params := a.funcParams(cgn.obj)
+ a.addConstraint(&reflectChanOfConstraint{
+ cgn: cgn,
+ t: params + 1,
+ result: a.funcResults(cgn.obj),
+ dirs: dirMap[dir],
+ })
+}
+
+// ---------- func Indirect(v Value) Value ----------
+
+// result = Indirect(v)
+type reflectIndirectConstraint struct {
+ cgn *cgnode
+ v nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *reflectIndirectConstraint) ptr() nodeid { return c.v }
+func (c *reflectIndirectConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "reflectIndirect.result")
+}
+func (c *reflectIndirectConstraint) renumber(mapping []nodeid) {
+ c.v = mapping[c.v]
+ c.result = mapping[c.result]
+}
+
+func (c *reflectIndirectConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect.Indirect(n%d)", c.result, c.v)
+}
+
+func (c *reflectIndirectConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ vObj := nodeid(x)
+ tDyn, _, _ := a.taggedValue(vObj)
+ var res nodeid
+ if tPtr, ok := tDyn.Underlying().(*types.Pointer); ok {
+ // load the payload of the pointer's tagged object
+ // into a new tagged object
+ res = a.makeTagged(tPtr.Elem(), c.cgn, nil)
+ a.load(res+1, vObj+1, 0, a.sizeof(tPtr.Elem()))
+ } else {
+ res = vObj
+ }
+
+ if a.addLabel(c.result, res) {
+ changed = true
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰Indirect(a *analysis, cgn *cgnode) {
+ a.addConstraint(&reflectIndirectConstraint{
+ cgn: cgn,
+ v: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+// ---------- func MakeChan(Type) Value ----------
+
+// result = MakeChan(typ)
+type reflectMakeChanConstraint struct {
+ cgn *cgnode
+ typ nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *reflectMakeChanConstraint) ptr() nodeid { return c.typ }
+func (c *reflectMakeChanConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "reflectMakeChan.result")
+}
+func (c *reflectMakeChanConstraint) renumber(mapping []nodeid) {
+ c.typ = mapping[c.typ]
+ c.result = mapping[c.result]
+}
+
+func (c *reflectMakeChanConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect.MakeChan(n%d)", c.result, c.typ)
+}
+
+func (c *reflectMakeChanConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ typObj := nodeid(x)
+ T := a.rtypeTaggedValue(typObj)
+ tChan, ok := T.Underlying().(*types.Chan)
+ if !ok || tChan.Dir() != types.SendRecv {
+ continue // not a bidirectional channel type
+ }
+
+ obj := a.nextNode()
+ a.addNodes(tChan.Elem(), "reflect.MakeChan.value")
+ a.endObject(obj, c.cgn, nil)
+
+ // put its address in a new T-tagged object
+ id := a.makeTagged(T, c.cgn, nil)
+ a.addLabel(id+1, obj)
+
+ // flow the T-tagged object to the result
+ if a.addLabel(c.result, id) {
+ changed = true
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰MakeChan(a *analysis, cgn *cgnode) {
+ a.addConstraint(&reflectMakeChanConstraint{
+ cgn: cgn,
+ typ: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+func ext۰reflect۰MakeFunc(a *analysis, cgn *cgnode) {} // TODO(adonovan)
+
+// ---------- func MakeMap(Type) Value ----------
+
+// result = MakeMap(typ)
+type reflectMakeMapConstraint struct {
+ cgn *cgnode
+ typ nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *reflectMakeMapConstraint) ptr() nodeid { return c.typ }
+func (c *reflectMakeMapConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "reflectMakeMap.result")
+}
+func (c *reflectMakeMapConstraint) renumber(mapping []nodeid) {
+ c.typ = mapping[c.typ]
+ c.result = mapping[c.result]
+}
+
+func (c *reflectMakeMapConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect.MakeMap(n%d)", c.result, c.typ)
+}
+
+func (c *reflectMakeMapConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ typObj := nodeid(x)
+ T := a.rtypeTaggedValue(typObj)
+ tMap, ok := T.Underlying().(*types.Map)
+ if !ok {
+ continue // not a map type
+ }
+
+ mapObj := a.nextNode()
+ a.addNodes(tMap.Key(), "reflect.MakeMap.key")
+ a.addNodes(tMap.Elem(), "reflect.MakeMap.value")
+ a.endObject(mapObj, c.cgn, nil)
+
+ // put its address in a new T-tagged object
+ id := a.makeTagged(T, c.cgn, nil)
+ a.addLabel(id+1, mapObj)
+
+ // flow the T-tagged object to the result
+ if a.addLabel(c.result, id) {
+ changed = true
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰MakeMap(a *analysis, cgn *cgnode) {
+ a.addConstraint(&reflectMakeMapConstraint{
+ cgn: cgn,
+ typ: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+// ---------- func MakeSlice(Type) Value ----------
+
+// result = MakeSlice(typ)
+type reflectMakeSliceConstraint struct {
+ cgn *cgnode
+ typ nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *reflectMakeSliceConstraint) ptr() nodeid { return c.typ }
+func (c *reflectMakeSliceConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "reflectMakeSlice.result")
+}
+func (c *reflectMakeSliceConstraint) renumber(mapping []nodeid) {
+ c.typ = mapping[c.typ]
+ c.result = mapping[c.result]
+}
+
+func (c *reflectMakeSliceConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect.MakeSlice(n%d)", c.result, c.typ)
+}
+
+func (c *reflectMakeSliceConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ typObj := nodeid(x)
+ T := a.rtypeTaggedValue(typObj)
+ if _, ok := T.Underlying().(*types.Slice); !ok {
+ continue // not a slice type
+ }
+
+ obj := a.nextNode()
+ a.addNodes(sliceToArray(T), "reflect.MakeSlice")
+ a.endObject(obj, c.cgn, nil)
+
+ // put its address in a new T-tagged object
+ id := a.makeTagged(T, c.cgn, nil)
+ a.addLabel(id+1, obj)
+
+ // flow the T-tagged object to the result
+ if a.addLabel(c.result, id) {
+ changed = true
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰MakeSlice(a *analysis, cgn *cgnode) {
+ a.addConstraint(&reflectMakeSliceConstraint{
+ cgn: cgn,
+ typ: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+func ext۰reflect۰MapOf(a *analysis, cgn *cgnode) {} // TODO(adonovan)
+
+// ---------- func New(Type) Value ----------
+
+// result = New(typ)
+type reflectNewConstraint struct {
+ cgn *cgnode
+ typ nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *reflectNewConstraint) ptr() nodeid { return c.typ }
+func (c *reflectNewConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "reflectNew.result")
+}
+func (c *reflectNewConstraint) renumber(mapping []nodeid) {
+ c.typ = mapping[c.typ]
+ c.result = mapping[c.result]
+}
+
+func (c *reflectNewConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect.New(n%d)", c.result, c.typ)
+}
+
+func (c *reflectNewConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ typObj := nodeid(x)
+ T := a.rtypeTaggedValue(typObj)
+
+ // allocate new T object
+ newObj := a.nextNode()
+ a.addNodes(T, "reflect.New")
+ a.endObject(newObj, c.cgn, nil)
+
+ // put its address in a new *T-tagged object
+ id := a.makeTagged(types.NewPointer(T), c.cgn, nil)
+ a.addLabel(id+1, newObj)
+
+ // flow the pointer to the result
+ if a.addLabel(c.result, id) {
+ changed = true
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰New(a *analysis, cgn *cgnode) {
+ a.addConstraint(&reflectNewConstraint{
+ cgn: cgn,
+ typ: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+func ext۰reflect۰NewAt(a *analysis, cgn *cgnode) {
+ ext۰reflect۰New(a, cgn)
+
+ // TODO(adonovan): also report dynamic calls to unsound intrinsics.
+ if site := cgn.callersite; site != nil {
+ a.warnf(site.pos(), "unsound: %s contains a reflect.NewAt() call", site.instr.Parent())
+ }
+}
+
+// ---------- func PtrTo(Type) Type ----------
+
+// result = PtrTo(t)
+type reflectPtrToConstraint struct {
+ cgn *cgnode
+ t nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *reflectPtrToConstraint) ptr() nodeid { return c.t }
+func (c *reflectPtrToConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "reflectPtrTo.result")
+}
+func (c *reflectPtrToConstraint) renumber(mapping []nodeid) {
+ c.t = mapping[c.t]
+ c.result = mapping[c.result]
+}
+
+func (c *reflectPtrToConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect.PtrTo(n%d)", c.result, c.t)
+}
+
+func (c *reflectPtrToConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ tObj := nodeid(x)
+ T := a.rtypeTaggedValue(tObj)
+
+ if typeTooHigh(T) {
+ continue
+ }
+
+ if a.addLabel(c.result, a.makeRtype(types.NewPointer(T))) {
+ changed = true
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰PtrTo(a *analysis, cgn *cgnode) {
+ a.addConstraint(&reflectPtrToConstraint{
+ cgn: cgn,
+ t: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+func ext۰reflect۰Select(a *analysis, cgn *cgnode) {} // TODO(adonovan)
+
+// ---------- func SliceOf(Type) Type ----------
+
+// result = SliceOf(t)
+type reflectSliceOfConstraint struct {
+ cgn *cgnode
+ t nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *reflectSliceOfConstraint) ptr() nodeid { return c.t }
+func (c *reflectSliceOfConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "reflectSliceOf.result")
+}
+func (c *reflectSliceOfConstraint) renumber(mapping []nodeid) {
+ c.t = mapping[c.t]
+ c.result = mapping[c.result]
+}
+
+func (c *reflectSliceOfConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect.SliceOf(n%d)", c.result, c.t)
+}
+
+func (c *reflectSliceOfConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ tObj := nodeid(x)
+ T := a.rtypeTaggedValue(tObj)
+
+ if typeTooHigh(T) {
+ continue
+ }
+
+ if a.addLabel(c.result, a.makeRtype(types.NewSlice(T))) {
+ changed = true
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰SliceOf(a *analysis, cgn *cgnode) {
+ a.addConstraint(&reflectSliceOfConstraint{
+ cgn: cgn,
+ t: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+// ---------- func TypeOf(v Value) Type ----------
+
+// result = TypeOf(i)
+type reflectTypeOfConstraint struct {
+ cgn *cgnode
+ i nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *reflectTypeOfConstraint) ptr() nodeid { return c.i }
+func (c *reflectTypeOfConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "reflectTypeOf.result")
+}
+func (c *reflectTypeOfConstraint) renumber(mapping []nodeid) {
+ c.i = mapping[c.i]
+ c.result = mapping[c.result]
+}
+
+func (c *reflectTypeOfConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect.TypeOf(n%d)", c.result, c.i)
+}
+
+func (c *reflectTypeOfConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ iObj := nodeid(x)
+ tDyn, _, _ := a.taggedValue(iObj)
+ if a.addLabel(c.result, a.makeRtype(tDyn)) {
+ changed = true
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰TypeOf(a *analysis, cgn *cgnode) {
+ a.addConstraint(&reflectTypeOfConstraint{
+ cgn: cgn,
+ i: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+// ---------- func ValueOf(interface{}) Value ----------
+
+func ext۰reflect۰ValueOf(a *analysis, cgn *cgnode) {
+ // TODO(adonovan): when we start creating indirect tagged
+ // objects, we'll need to handle them specially here since
+ // they must never appear in the PTS of an interface{}.
+ a.copy(a.funcResults(cgn.obj), a.funcParams(cgn.obj), 1)
+}
+
+// ---------- func Zero(Type) Value ----------
+
+// result = Zero(typ)
+type reflectZeroConstraint struct {
+ cgn *cgnode
+ typ nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *reflectZeroConstraint) ptr() nodeid { return c.typ }
+func (c *reflectZeroConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "reflectZero.result")
+}
+func (c *reflectZeroConstraint) renumber(mapping []nodeid) {
+ c.typ = mapping[c.typ]
+ c.result = mapping[c.result]
+}
+
+func (c *reflectZeroConstraint) String() string {
+ return fmt.Sprintf("n%d = reflect.Zero(n%d)", c.result, c.typ)
+}
+
+func (c *reflectZeroConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ typObj := nodeid(x)
+ T := a.rtypeTaggedValue(typObj)
+
+ // TODO(adonovan): if T is an interface type, we need
+ // to create an indirect tagged object containing
+ // new(T). To avoid updates of such shared values,
+ // we'll need another flag on indirect tagged objects
+ // that marks whether they are addressable or
+ // readonly, just like the reflect package does.
+
+ // memoize using a.reflectZeros[T]
+ var id nodeid
+ if z := a.reflectZeros.At(T); false && z != nil {
+ id = z.(nodeid)
+ } else {
+ id = a.makeTagged(T, c.cgn, nil)
+ a.reflectZeros.Set(T, id)
+ }
+ if a.addLabel(c.result, id) {
+ changed = true
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰Zero(a *analysis, cgn *cgnode) {
+ a.addConstraint(&reflectZeroConstraint{
+ cgn: cgn,
+ typ: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+// -------------------- (*reflect.rtype) methods --------------------
+
+// ---------- func (*rtype) Elem() Type ----------
+
+// result = Elem(t)
+type rtypeElemConstraint struct {
+ cgn *cgnode
+ t nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *rtypeElemConstraint) ptr() nodeid { return c.t }
+func (c *rtypeElemConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "rtypeElem.result")
+}
+func (c *rtypeElemConstraint) renumber(mapping []nodeid) {
+ c.t = mapping[c.t]
+ c.result = mapping[c.result]
+}
+
+func (c *rtypeElemConstraint) String() string {
+ return fmt.Sprintf("n%d = (*reflect.rtype).Elem(n%d)", c.result, c.t)
+}
+
+func (c *rtypeElemConstraint) solve(a *analysis, delta *nodeset) {
+ // Implemented by *types.{Map,Chan,Array,Slice,Pointer}.
+ type hasElem interface {
+ Elem() types.Type
+ }
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ tObj := nodeid(x)
+ T := a.nodes[tObj].obj.data.(types.Type)
+ if tHasElem, ok := T.Underlying().(hasElem); ok {
+ if a.addLabel(c.result, a.makeRtype(tHasElem.Elem())) {
+ changed = true
+ }
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰rtype۰Elem(a *analysis, cgn *cgnode) {
+ a.addConstraint(&rtypeElemConstraint{
+ cgn: cgn,
+ t: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+// ---------- func (*rtype) Field(int) StructField ----------
+// ---------- func (*rtype) FieldByName(string) (StructField, bool) ----------
+
+// result = FieldByName(t, name)
+// result = Field(t, _)
+type rtypeFieldByNameConstraint struct {
+ cgn *cgnode
+ name string // name of field; "" for unknown
+ t nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *rtypeFieldByNameConstraint) ptr() nodeid { return c.t }
+func (c *rtypeFieldByNameConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result+3), "rtypeFieldByName.result.Type")
+}
+func (c *rtypeFieldByNameConstraint) renumber(mapping []nodeid) {
+ c.t = mapping[c.t]
+ c.result = mapping[c.result]
+}
+
+func (c *rtypeFieldByNameConstraint) String() string {
+ return fmt.Sprintf("n%d = (*reflect.rtype).FieldByName(n%d, %q)", c.result, c.t, c.name)
+}
+
+func (c *rtypeFieldByNameConstraint) solve(a *analysis, delta *nodeset) {
+ // type StructField struct {
+ // 0 __identity__
+ // 1 Name string
+ // 2 PkgPath string
+ // 3 Type Type
+ // 4 Tag StructTag
+ // 5 Offset uintptr
+ // 6 Index []int
+ // 7 Anonymous bool
+ // }
+
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ tObj := nodeid(x)
+ T := a.nodes[tObj].obj.data.(types.Type)
+ tStruct, ok := T.Underlying().(*types.Struct)
+ if !ok {
+ continue // not a struct type
+ }
+
+ n := tStruct.NumFields()
+ for i := 0; i < n; i++ {
+ f := tStruct.Field(i)
+ if c.name == "" || c.name == f.Name() {
+
+ // a.offsetOf(Type) is 3.
+ if id := c.result + 3; a.addLabel(id, a.makeRtype(f.Type())) {
+ a.addWork(id)
+ }
+ // TODO(adonovan): StructField.Index should be non-nil.
+ }
+ }
+ }
+}
+
+func ext۰reflect۰rtype۰FieldByName(a *analysis, cgn *cgnode) {
+ // If we have access to the callsite,
+ // and the argument is a string constant,
+ // return only that field.
+ var name string
+ if site := cgn.callersite; site != nil {
+ if c, ok := site.instr.Common().Args[0].(*ssa.Const); ok {
+ name = exact.StringVal(c.Value)
+ }
+ }
+
+ a.addConstraint(&rtypeFieldByNameConstraint{
+ cgn: cgn,
+ name: name,
+ t: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+func ext۰reflect۰rtype۰Field(a *analysis, cgn *cgnode) {
+ // No-one ever calls Field with a constant argument,
+ // so we don't specialize that case.
+ a.addConstraint(&rtypeFieldByNameConstraint{
+ cgn: cgn,
+ t: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+func ext۰reflect۰rtype۰FieldByIndex(a *analysis, cgn *cgnode) {} // TODO(adonovan)
+func ext۰reflect۰rtype۰FieldByNameFunc(a *analysis, cgn *cgnode) {} // TODO(adonovan)
+
+// ---------- func (*rtype) In/Out(i int) Type ----------
+
+// result = In/Out(t, i)
+type rtypeInOutConstraint struct {
+ cgn *cgnode
+ t nodeid // (ptr)
+ result nodeid // (indirect)
+ out bool
+ i int // -ve if not a constant
+}
+
+func (c *rtypeInOutConstraint) ptr() nodeid { return c.t }
+func (c *rtypeInOutConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "rtypeInOut.result")
+}
+func (c *rtypeInOutConstraint) renumber(mapping []nodeid) {
+ c.t = mapping[c.t]
+ c.result = mapping[c.result]
+}
+
+func (c *rtypeInOutConstraint) String() string {
+ return fmt.Sprintf("n%d = (*reflect.rtype).InOut(n%d, %d)", c.result, c.t, c.i)
+}
+
+func (c *rtypeInOutConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ tObj := nodeid(x)
+ T := a.nodes[tObj].obj.data.(types.Type)
+ sig, ok := T.Underlying().(*types.Signature)
+ if !ok {
+ continue // not a func type
+ }
+
+ tuple := sig.Params()
+ if c.out {
+ tuple = sig.Results()
+ }
+ for i, n := 0, tuple.Len(); i < n; i++ {
+ if c.i < 0 || c.i == i {
+ if a.addLabel(c.result, a.makeRtype(tuple.At(i).Type())) {
+ changed = true
+ }
+ }
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰rtype۰InOut(a *analysis, cgn *cgnode, out bool) {
+ // If we have access to the callsite,
+ // and the argument is an int constant,
+ // return only that parameter.
+ index := -1
+ if site := cgn.callersite; site != nil {
+ if c, ok := site.instr.Common().Args[0].(*ssa.Const); ok {
+ v, _ := exact.Int64Val(c.Value)
+ index = int(v)
+ }
+ }
+ a.addConstraint(&rtypeInOutConstraint{
+ cgn: cgn,
+ t: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ out: out,
+ i: index,
+ })
+}
+
+func ext۰reflect۰rtype۰In(a *analysis, cgn *cgnode) {
+ ext۰reflect۰rtype۰InOut(a, cgn, false)
+}
+
+func ext۰reflect۰rtype۰Out(a *analysis, cgn *cgnode) {
+ ext۰reflect۰rtype۰InOut(a, cgn, true)
+}
+
+// ---------- func (*rtype) Key() Type ----------
+
+// result = Key(t)
+type rtypeKeyConstraint struct {
+ cgn *cgnode
+ t nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *rtypeKeyConstraint) ptr() nodeid { return c.t }
+func (c *rtypeKeyConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result), "rtypeKey.result")
+}
+func (c *rtypeKeyConstraint) renumber(mapping []nodeid) {
+ c.t = mapping[c.t]
+ c.result = mapping[c.result]
+}
+
+func (c *rtypeKeyConstraint) String() string {
+ return fmt.Sprintf("n%d = (*reflect.rtype).Key(n%d)", c.result, c.t)
+}
+
+func (c *rtypeKeyConstraint) solve(a *analysis, delta *nodeset) {
+ changed := false
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ tObj := nodeid(x)
+ T := a.nodes[tObj].obj.data.(types.Type)
+ if tMap, ok := T.Underlying().(*types.Map); ok {
+ if a.addLabel(c.result, a.makeRtype(tMap.Key())) {
+ changed = true
+ }
+ }
+ }
+ if changed {
+ a.addWork(c.result)
+ }
+}
+
+func ext۰reflect۰rtype۰Key(a *analysis, cgn *cgnode) {
+ a.addConstraint(&rtypeKeyConstraint{
+ cgn: cgn,
+ t: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+// ---------- func (*rtype) Method(int) (Method, bool) ----------
+// ---------- func (*rtype) MethodByName(string) (Method, bool) ----------
+
+// result = MethodByName(t, name)
+// result = Method(t, _)
+type rtypeMethodByNameConstraint struct {
+ cgn *cgnode
+ name string // name of method; "" for unknown
+ t nodeid // (ptr)
+ result nodeid // (indirect)
+}
+
+func (c *rtypeMethodByNameConstraint) ptr() nodeid { return c.t }
+func (c *rtypeMethodByNameConstraint) presolve(h *hvn) {
+ h.markIndirect(onodeid(c.result+3), "rtypeMethodByName.result.Type")
+ h.markIndirect(onodeid(c.result+4), "rtypeMethodByName.result.Func")
+}
+func (c *rtypeMethodByNameConstraint) renumber(mapping []nodeid) {
+ c.t = mapping[c.t]
+ c.result = mapping[c.result]
+}
+
+func (c *rtypeMethodByNameConstraint) String() string {
+ return fmt.Sprintf("n%d = (*reflect.rtype).MethodByName(n%d, %q)", c.result, c.t, c.name)
+}
+
+// changeRecv returns sig with Recv prepended to Params().
+func changeRecv(sig *types.Signature) *types.Signature {
+ params := sig.Params()
+ n := params.Len()
+ p2 := make([]*types.Var, n+1)
+ p2[0] = sig.Recv()
+ for i := 0; i < n; i++ {
+ p2[i+1] = params.At(i)
+ }
+ return types.NewSignature(nil, nil, types.NewTuple(p2...), sig.Results(), sig.Variadic())
+}
+
+func (c *rtypeMethodByNameConstraint) solve(a *analysis, delta *nodeset) {
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ tObj := nodeid(x)
+ T := a.nodes[tObj].obj.data.(types.Type)
+
+ isIface := isInterface(T)
+
+ // We don't use Lookup(c.name) when c.name != "" to avoid
+ // ambiguity: >1 unexported methods could match.
+ mset := a.prog.MethodSets.MethodSet(T)
+ for i, n := 0, mset.Len(); i < n; i++ {
+ sel := mset.At(i)
+ if c.name == "" || c.name == sel.Obj().Name() {
+ // type Method struct {
+ // 0 __identity__
+ // 1 Name string
+ // 2 PkgPath string
+ // 3 Type Type
+ // 4 Func Value
+ // 5 Index int
+ // }
+
+ var sig *types.Signature
+ var fn *ssa.Function
+ if isIface {
+ sig = sel.Type().(*types.Signature)
+ } else {
+ fn = a.prog.Method(sel)
+ // move receiver to params[0]
+ sig = changeRecv(fn.Signature)
+ }
+
+ // a.offsetOf(Type) is 3.
+ if id := c.result + 3; a.addLabel(id, a.makeRtype(sig)) {
+ a.addWork(id)
+ }
+ if fn != nil {
+ // a.offsetOf(Func) is 4.
+ if id := c.result + 4; a.addLabel(id, a.objectNode(nil, fn)) {
+ a.addWork(id)
+ }
+ }
+ }
+ }
+ }
+}
+
+func ext۰reflect۰rtype۰MethodByName(a *analysis, cgn *cgnode) {
+ // If we have access to the callsite,
+ // and the argument is a string constant,
+ // return only that method.
+ var name string
+ if site := cgn.callersite; site != nil {
+ if c, ok := site.instr.Common().Args[0].(*ssa.Const); ok {
+ name = exact.StringVal(c.Value)
+ }
+ }
+
+ a.addConstraint(&rtypeMethodByNameConstraint{
+ cgn: cgn,
+ name: name,
+ t: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+func ext۰reflect۰rtype۰Method(a *analysis, cgn *cgnode) {
+ // No-one ever calls Method with a constant argument,
+ // so we don't specialize that case.
+ a.addConstraint(&rtypeMethodByNameConstraint{
+ cgn: cgn,
+ t: a.funcParams(cgn.obj),
+ result: a.funcResults(cgn.obj),
+ })
+}
+
+// typeHeight returns the "height" of the type, which is roughly
+// speaking the number of chan, map, pointer and slice type constructors
+// at the root of T; these are the four type kinds that can be created
+// via reflection. Chan and map constructors are counted as double the
+// height of slice and pointer constructors since they are less often
+// deeply nested.
+//
+// The solver rules for type constructors must somehow bound the set of
+// types they create to ensure termination of the algorithm in cases
+// where the output of a type constructor flows to its input, e.g.
+//
+// func f(t reflect.Type) {
+// f(reflect.PtrTo(t))
+// }
+//
+// It does this by limiting the type height to k, but this still leaves
+// a potentially exponential (4^k) number of of types that may be
+// enumerated in pathological cases.
+//
+func typeHeight(T types.Type) int {
+ switch T := T.(type) {
+ case *types.Chan:
+ return 2 + typeHeight(T.Elem())
+ case *types.Map:
+ k := typeHeight(T.Key())
+ v := typeHeight(T.Elem())
+ if v > k {
+ k = v // max(k, v)
+ }
+ return 2 + k
+ case *types.Slice:
+ return 1 + typeHeight(T.Elem())
+ case *types.Pointer:
+ return 1 + typeHeight(T.Elem())
+ }
+ return 0
+}
+
+func typeTooHigh(T types.Type) bool {
+ return typeHeight(T) > 3
+}
diff --git a/llgo/third_party/go.tools/go/pointer/solve.go b/llgo/third_party/go.tools/go/pointer/solve.go
new file mode 100644
index 0000000000000000000000000000000000000000..81d24da1666d6bad444631b52a7006bc08c30164
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/solve.go
@@ -0,0 +1,371 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pointer
+
+// This file defines a naive Andersen-style solver for the inclusion
+// constraint system.
+
+import (
+ "fmt"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+type solverState struct {
+ complex []constraint // complex constraints attached to this node
+ copyTo nodeset // simple copy constraint edges
+ pts nodeset // points-to set of this node
+ prevPTS nodeset // pts(n) in previous iteration (for difference propagation)
+}
+
+func (a *analysis) solve() {
+ start("Solving")
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\n\n==== Solving constraints\n\n")
+ }
+
+ // Solver main loop.
+ var delta nodeset
+ for {
+ // Add new constraints to the graph:
+ // static constraints from SSA on round 1,
+ // dynamic constraints from reflection thereafter.
+ a.processNewConstraints()
+
+ var x int
+ if !a.work.TakeMin(&x) {
+ break // empty
+ }
+ id := nodeid(x)
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\tnode n%d\n", id)
+ }
+
+ n := a.nodes[id]
+
+ // Difference propagation.
+ delta.Difference(&n.solve.pts.Sparse, &n.solve.prevPTS.Sparse)
+ if delta.IsEmpty() {
+ continue
+ }
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\t\tpts(n%d : %s) = %s + %s\n",
+ id, n.typ, &delta, &n.solve.prevPTS)
+ }
+ n.solve.prevPTS.Copy(&n.solve.pts.Sparse)
+
+ // Apply all resolution rules attached to n.
+ a.solveConstraints(n, &delta)
+
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\t\tpts(n%d) = %s\n", id, &n.solve.pts)
+ }
+ }
+
+ if !a.nodes[0].solve.pts.IsEmpty() {
+ panic(fmt.Sprintf("pts(0) is nonempty: %s", &a.nodes[0].solve.pts))
+ }
+
+ // Release working state (but keep final PTS).
+ for _, n := range a.nodes {
+ n.solve.complex = nil
+ n.solve.copyTo.Clear()
+ n.solve.prevPTS.Clear()
+ }
+
+ if a.log != nil {
+ fmt.Fprintf(a.log, "Solver done\n")
+
+ // Dump solution.
+ for i, n := range a.nodes {
+ if !n.solve.pts.IsEmpty() {
+ fmt.Fprintf(a.log, "pts(n%d) = %s : %s\n", i, &n.solve.pts, n.typ)
+ }
+ }
+ }
+ stop("Solving")
+}
+
+// processNewConstraints takes the new constraints from a.constraints
+// and adds them to the graph, ensuring
+// that new constraints are applied to pre-existing labels and
+// that pre-existing constraints are applied to new labels.
+//
+func (a *analysis) processNewConstraints() {
+ // Take the slice of new constraints.
+ // (May grow during call to solveConstraints.)
+ constraints := a.constraints
+ a.constraints = nil
+
+ // Initialize points-to sets from addr-of (base) constraints.
+ for _, c := range constraints {
+ if c, ok := c.(*addrConstraint); ok {
+ dst := a.nodes[c.dst]
+ dst.solve.pts.add(c.src)
+
+ // Populate the worklist with nodes that point to
+ // something initially (due to addrConstraints) and
+ // have other constraints attached.
+ // (A no-op in round 1.)
+ if !dst.solve.copyTo.IsEmpty() || len(dst.solve.complex) > 0 {
+ a.addWork(c.dst)
+ }
+ }
+ }
+
+ // Attach simple (copy) and complex constraints to nodes.
+ var stale nodeset
+ for _, c := range constraints {
+ var id nodeid
+ switch c := c.(type) {
+ case *addrConstraint:
+ // base constraints handled in previous loop
+ continue
+ case *copyConstraint:
+ // simple (copy) constraint
+ id = c.src
+ a.nodes[id].solve.copyTo.add(c.dst)
+ default:
+ // complex constraint
+ id = c.ptr()
+ solve := a.nodes[id].solve
+ solve.complex = append(solve.complex, c)
+ }
+
+ if n := a.nodes[id]; !n.solve.pts.IsEmpty() {
+ if !n.solve.prevPTS.IsEmpty() {
+ stale.add(id)
+ }
+ a.addWork(id)
+ }
+ }
+ // Apply new constraints to pre-existing PTS labels.
+ var space [50]int
+ for _, id := range stale.AppendTo(space[:0]) {
+ n := a.nodes[nodeid(id)]
+ a.solveConstraints(n, &n.solve.prevPTS)
+ }
+}
+
+// solveConstraints applies each resolution rule attached to node n to
+// the set of labels delta. It may generate new constraints in
+// a.constraints.
+//
+func (a *analysis) solveConstraints(n *node, delta *nodeset) {
+ if delta.IsEmpty() {
+ return
+ }
+
+ // Process complex constraints dependent on n.
+ for _, c := range n.solve.complex {
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\t\tconstraint %s\n", c)
+ }
+ c.solve(a, delta)
+ }
+
+ // Process copy constraints.
+ var copySeen nodeset
+ for _, x := range n.solve.copyTo.AppendTo(a.deltaSpace) {
+ mid := nodeid(x)
+ if copySeen.add(mid) {
+ if a.nodes[mid].solve.pts.addAll(delta) {
+ a.addWork(mid)
+ }
+ }
+ }
+}
+
+// addLabel adds label to the points-to set of ptr and reports whether the set grew.
+func (a *analysis) addLabel(ptr, label nodeid) bool {
+ b := a.nodes[ptr].solve.pts.add(label)
+ if b && a.log != nil {
+ fmt.Fprintf(a.log, "\t\tpts(n%d) += n%d\n", ptr, label)
+ }
+ return b
+}
+
+func (a *analysis) addWork(id nodeid) {
+ a.work.Insert(int(id))
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\t\twork: n%d\n", id)
+ }
+}
+
+// onlineCopy adds a copy edge. It is called online, i.e. during
+// solving, so it adds edges and pts members directly rather than by
+// instantiating a 'constraint'.
+//
+// The size of the copy is implicitly 1.
+// It returns true if pts(dst) changed.
+//
+func (a *analysis) onlineCopy(dst, src nodeid) bool {
+ if dst != src {
+ if nsrc := a.nodes[src]; nsrc.solve.copyTo.add(dst) {
+ if a.log != nil {
+ fmt.Fprintf(a.log, "\t\t\tdynamic copy n%d <- n%d\n", dst, src)
+ }
+ // TODO(adonovan): most calls to onlineCopy
+ // are followed by addWork, possibly batched
+ // via a 'changed' flag; see if there's a
+ // noticeable penalty to calling addWork here.
+ return a.nodes[dst].solve.pts.addAll(&nsrc.solve.pts)
+ }
+ }
+ return false
+}
+
+// Returns sizeof.
+// Implicitly adds nodes to worklist.
+//
+// TODO(adonovan): now that we support a.copy() during solving, we
+// could eliminate onlineCopyN, but it's much slower. Investigate.
+//
+func (a *analysis) onlineCopyN(dst, src nodeid, sizeof uint32) uint32 {
+ for i := uint32(0); i < sizeof; i++ {
+ if a.onlineCopy(dst, src) {
+ a.addWork(dst)
+ }
+ src++
+ dst++
+ }
+ return sizeof
+}
+
+func (c *loadConstraint) solve(a *analysis, delta *nodeset) {
+ var changed bool
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ k := nodeid(x)
+ koff := k + nodeid(c.offset)
+ if a.onlineCopy(c.dst, koff) {
+ changed = true
+ }
+ }
+ if changed {
+ a.addWork(c.dst)
+ }
+}
+
+func (c *storeConstraint) solve(a *analysis, delta *nodeset) {
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ k := nodeid(x)
+ koff := k + nodeid(c.offset)
+ if a.onlineCopy(koff, c.src) {
+ a.addWork(koff)
+ }
+ }
+}
+
+func (c *offsetAddrConstraint) solve(a *analysis, delta *nodeset) {
+ dst := a.nodes[c.dst]
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ k := nodeid(x)
+ if dst.solve.pts.add(k + nodeid(c.offset)) {
+ a.addWork(c.dst)
+ }
+ }
+}
+
+func (c *typeFilterConstraint) solve(a *analysis, delta *nodeset) {
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ ifaceObj := nodeid(x)
+ tDyn, _, indirect := a.taggedValue(ifaceObj)
+ if indirect {
+ // TODO(adonovan): we'll need to implement this
+ // when we start creating indirect tagged objects.
+ panic("indirect tagged object")
+ }
+
+ if types.AssignableTo(tDyn, c.typ) {
+ if a.addLabel(c.dst, ifaceObj) {
+ a.addWork(c.dst)
+ }
+ }
+ }
+}
+
+func (c *untagConstraint) solve(a *analysis, delta *nodeset) {
+ predicate := types.AssignableTo
+ if c.exact {
+ predicate = types.Identical
+ }
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ ifaceObj := nodeid(x)
+ tDyn, v, indirect := a.taggedValue(ifaceObj)
+ if indirect {
+ // TODO(adonovan): we'll need to implement this
+ // when we start creating indirect tagged objects.
+ panic("indirect tagged object")
+ }
+
+ if predicate(tDyn, c.typ) {
+ // Copy payload sans tag to dst.
+ //
+ // TODO(adonovan): opt: if tDyn is
+ // nonpointerlike we can skip this entire
+ // constraint, perhaps. We only care about
+ // pointers among the fields.
+ a.onlineCopyN(c.dst, v, a.sizeof(tDyn))
+ }
+ }
+}
+
+func (c *invokeConstraint) solve(a *analysis, delta *nodeset) {
+ for _, x := range delta.AppendTo(a.deltaSpace) {
+ ifaceObj := nodeid(x)
+ tDyn, v, indirect := a.taggedValue(ifaceObj)
+ if indirect {
+ // TODO(adonovan): we may need to implement this if
+ // we ever apply invokeConstraints to reflect.Value PTSs,
+ // e.g. for (reflect.Value).Call.
+ panic("indirect tagged object")
+ }
+
+ // Look up the concrete method.
+ fn := a.prog.LookupMethod(tDyn, c.method.Pkg(), c.method.Name())
+ if fn == nil {
+ panic(fmt.Sprintf("n%d: no ssa.Function for %s", c.iface, c.method))
+ }
+ sig := fn.Signature
+
+ fnObj := a.globalobj[fn] // dynamic calls use shared contour
+ if fnObj == 0 {
+ // a.objectNode(fn) was not called during gen phase.
+ panic(fmt.Sprintf("a.globalobj[%s]==nil", fn))
+ }
+
+ // Make callsite's fn variable point to identity of
+ // concrete method. (There's no need to add it to
+ // worklist since it never has attached constraints.)
+ a.addLabel(c.params, fnObj)
+
+ // Extract value and connect to method's receiver.
+ // Copy payload to method's receiver param (arg0).
+ arg0 := a.funcParams(fnObj)
+ recvSize := a.sizeof(sig.Recv().Type())
+ a.onlineCopyN(arg0, v, recvSize)
+
+ src := c.params + 1 // skip past identity
+ dst := arg0 + nodeid(recvSize)
+
+ // Copy caller's argument block to method formal parameters.
+ paramsSize := a.sizeof(sig.Params())
+ a.onlineCopyN(dst, src, paramsSize)
+ src += nodeid(paramsSize)
+ dst += nodeid(paramsSize)
+
+ // Copy method results to caller's result block.
+ resultsSize := a.sizeof(sig.Results())
+ a.onlineCopyN(src, dst, resultsSize)
+ }
+}
+
+func (c *addrConstraint) solve(a *analysis, delta *nodeset) {
+ panic("addr is not a complex constraint")
+}
+
+func (c *copyConstraint) solve(a *analysis, delta *nodeset) {
+ panic("copy is not a complex constraint")
+}
diff --git a/llgo/third_party/go.tools/go/pointer/stdlib_test.go b/llgo/third_party/go.tools/go/pointer/stdlib_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..028b73343e45d93e6b7e648e48f5309c3188110d
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/stdlib_test.go
@@ -0,0 +1,108 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pointer
+
+// This file runs the pointer analysis on all packages and tests beneath
+// $GOROOT. It provides a "smoke test" that the analysis doesn't crash
+// on a large input, and a benchmark for performance measurement.
+//
+// Because it is relatively slow, the --stdlib flag must be enabled for
+// this test to run:
+// % go test -v golang.org/x/tools/go/pointer --stdlib
+
+import (
+ "flag"
+ "go/build"
+ "go/token"
+ "testing"
+ "time"
+
+ "llvm.org/llgo/third_party/go.tools/go/buildutil"
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/ssa/ssautil"
+)
+
+var runStdlibTest = flag.Bool("stdlib", false, "Run the (slow) stdlib test")
+
+func TestStdlib(t *testing.T) {
+ if !*runStdlibTest {
+ t.Skip("skipping (slow) stdlib test (use --stdlib)")
+ }
+
+ // Load, parse and type-check the program.
+ ctxt := build.Default // copy
+ ctxt.GOPATH = "" // disable GOPATH
+ conf := loader.Config{
+ SourceImports: true,
+ Build: &ctxt,
+ }
+ if _, err := conf.FromArgs(buildutil.AllPackages(conf.Build), true); err != nil {
+ t.Errorf("FromArgs failed: %v", err)
+ return
+ }
+
+ iprog, err := conf.Load()
+ if err != nil {
+ t.Fatalf("Load failed: %v", err)
+ }
+
+ // Create SSA packages.
+ prog := ssa.Create(iprog, 0)
+ prog.BuildAll()
+
+ numPkgs := len(prog.AllPackages())
+ if want := 240; numPkgs < want {
+ t.Errorf("Loaded only %d packages, want at least %d", numPkgs, want)
+ }
+
+ // Determine the set of packages/tests to analyze.
+ var testPkgs []*ssa.Package
+ for _, info := range iprog.InitialPackages() {
+ testPkgs = append(testPkgs, prog.Package(info.Pkg))
+ }
+ testmain := prog.CreateTestMainPackage(testPkgs...)
+ if testmain == nil {
+ t.Fatal("analysis scope has tests")
+ }
+
+ // Run the analysis.
+ config := &Config{
+ Reflection: false, // TODO(adonovan): fix remaining bug in rVCallConstraint, then enable.
+ BuildCallGraph: true,
+ Mains: []*ssa.Package{testmain},
+ }
+ // TODO(adonovan): add some query values (affects track bits).
+
+ t0 := time.Now()
+
+ result, err := Analyze(config)
+ if err != nil {
+ t.Fatal(err) // internal error in pointer analysis
+ }
+ _ = result // TODO(adonovan): measure something
+
+ t1 := time.Now()
+
+ // Dump some statistics.
+ allFuncs := ssautil.AllFunctions(prog)
+ var numInstrs int
+ for fn := range allFuncs {
+ for _, b := range fn.Blocks {
+ numInstrs += len(b.Instrs)
+ }
+ }
+
+ // determine line count
+ var lineCount int
+ prog.Fset.Iterate(func(f *token.File) bool {
+ lineCount += f.LineCount()
+ return true
+ })
+
+ t.Log("#Source lines: ", lineCount)
+ t.Log("#Instructions: ", numInstrs)
+ t.Log("Pointer analysis: ", t1.Sub(t0))
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/a_test.go b/llgo/third_party/go.tools/go/pointer/testdata/a_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..3baa9ac7ef42f7ca0c602e4e168b6835da9b3ae2
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/a_test.go
@@ -0,0 +1,42 @@
+// +build ignore
+
+package a
+
+// This test exercises the synthesis of testmain packages for tests.
+// The test framework doesn't directly let us perform negative
+// assertions (i.e. that TestingQuux isn't called, or that its
+// parameter's PTS is empty) so this test is rather roundabout.
+
+import "testing"
+
+func log(f func(*testing.T)) {
+ // The PTS of f is the set of called tests. TestingQuux is not present.
+ print(f) // @pointsto main.Test | main.TestFoo
+}
+
+func Test(t *testing.T) {
+ // Don't assert @pointsto(t) since its label contains a fragile line number.
+ log(Test)
+}
+
+func TestFoo(t *testing.T) {
+ // Don't assert @pointsto(t) since its label contains a fragile line number.
+ log(TestFoo)
+}
+
+func TestingQuux(t *testing.T) {
+ // We can't assert @pointsto(t) since this is dead code.
+ log(TestingQuux)
+}
+
+func BenchmarkFoo(b *testing.B) {
+}
+
+func ExampleBar() {
+}
+
+// Excludes TestingQuux.
+// @calls testing.tRunner -> main.Test
+// @calls testing.tRunner -> main.TestFoo
+// @calls testing.runExample -> main.ExampleBar
+// @calls (*testing.B).runN -> main.BenchmarkFoo
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/another.go b/llgo/third_party/go.tools/go/pointer/testdata/another.go
new file mode 100644
index 0000000000000000000000000000000000000000..443c94d060c39b8c763ae3f8bd99cdf09e5aaaea
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/another.go
@@ -0,0 +1,34 @@
+// +build ignore
+
+package main
+
+var unknown bool
+
+type S string
+
+func incr(x int) int { return x + 1 }
+
+func main() {
+ var i interface{}
+ i = 1
+ if unknown {
+ i = S("foo")
+ }
+ if unknown {
+ i = (func(int, int))(nil) // NB type compares equal to that below.
+ }
+ // Look, the test harness can handle equal-but-not-String-equal
+ // types because we parse types and using a typemap.
+ if unknown {
+ i = (func(x int, y int))(nil)
+ }
+ if unknown {
+ i = incr
+ }
+ print(i) // @types int | S | func(int, int) | func(int) int
+
+ // NB, an interface may never directly alias any global
+ // labels, even though it may contain pointers that do.
+ print(i) // @pointsto makeinterface:func(x int) int | makeinterface:func(x int, y int) | makeinterface:func(int, int) | makeinterface:int | makeinterface:main.S
+ print(i.(func(int) int)) // @pointsto main.incr
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/arrayreflect.go b/llgo/third_party/go.tools/go/pointer/testdata/arrayreflect.go
new file mode 100644
index 0000000000000000000000000000000000000000..2b2367409c0cd4a25b698b34d8b99e1679badc5f
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/arrayreflect.go
@@ -0,0 +1,191 @@
+// +build ignore
+
+package main
+
+// Test of arrays & slices with reflection.
+
+import "reflect"
+
+var a, b int
+
+type S string
+
+func reflectValueSlice() {
+ // reflect.Value contains a slice.
+ slice := make([]*int, 10) // @line slice
+ slice[0] = &a
+ rvsl := reflect.ValueOf(slice).Slice(0, 0)
+ print(rvsl.Interface()) // @types []*int
+ print(rvsl.Interface().([]*int)) // @pointsto makeslice@slice:15
+ print(rvsl.Interface().([]*int)[42]) // @pointsto main.a
+
+ // reflect.Value contains an array (non-addressable).
+ array := [10]*int{&a} // @line array
+ rvarray := reflect.ValueOf(array).Slice(0, 0)
+ print(rvarray.Interface()) // @types
+ print(rvarray.Interface().([]*int)) // @pointsto
+ print(rvarray.Interface().([]*int)[42]) // @pointsto
+
+ // reflect.Value contains a pointer-to-array
+ rvparray := reflect.ValueOf(&array).Slice(0, 0)
+ print(rvparray.Interface()) // @types []*int
+ print(rvparray.Interface().([]*int)) // @pointsto array@array:2
+ print(rvparray.Interface().([]*int)[42]) // @pointsto main.a
+
+ // reflect.Value contains a string.
+ rvstring := reflect.ValueOf("hi").Slice(0, 0)
+ print(rvstring.Interface()) // @types string
+
+ // reflect.Value contains a (named) string type.
+ rvS := reflect.ValueOf(S("hi")).Slice(0, 0)
+ print(rvS.Interface()) // @types S
+
+ // reflect.Value contains a non-array pointer.
+ rvptr := reflect.ValueOf(new(int)).Slice(0, 0)
+ print(rvptr.Interface()) // @types
+
+ // reflect.Value contains a non-string basic type.
+ rvint := reflect.ValueOf(3).Slice(0, 0)
+ print(rvint.Interface()) // @types
+}
+
+func reflectValueBytes() {
+ sl1 := make([]byte, 0) // @line ar5sl1
+ sl2 := make([]byte, 0) // @line ar5sl2
+
+ rvsl1 := reflect.ValueOf(sl1)
+ print(rvsl1.Interface()) // @types []byte
+ print(rvsl1.Interface().([]byte)) // @pointsto makeslice@ar5sl1:13
+ print(rvsl1.Bytes()) // @pointsto makeslice@ar5sl1:13
+
+ rvsl2 := reflect.ValueOf(123)
+ rvsl2.SetBytes(sl2)
+ print(rvsl2.Interface()) // @types int
+ print(rvsl2.Interface().([]byte)) // @pointsto
+ print(rvsl2.Bytes()) // @pointsto
+
+ rvsl3 := reflect.ValueOf([]byte(nil))
+ rvsl3.SetBytes(sl2)
+ print(rvsl3.Interface()) // @types []byte
+ print(rvsl3.Interface().([]byte)) // @pointsto makeslice@ar5sl2:13
+ print(rvsl3.Bytes()) // @pointsto makeslice@ar5sl2:13
+}
+
+func reflectValueIndex() {
+ slice := []*int{&a} // @line ar6slice
+ rv1 := reflect.ValueOf(slice)
+ print(rv1.Index(42).Interface()) // @types *int
+ print(rv1.Index(42).Interface().(*int)) // @pointsto main.a
+
+ array := [10]*int{&a}
+ rv2 := reflect.ValueOf(array)
+ print(rv2.Index(42).Interface()) // @types *int
+ print(rv2.Index(42).Interface().(*int)) // @pointsto main.a
+
+ rv3 := reflect.ValueOf("string")
+ print(rv3.Index(42).Interface()) // @types rune
+
+ rv4 := reflect.ValueOf(&array)
+ print(rv4.Index(42).Interface()) // @types
+
+ rv5 := reflect.ValueOf(3)
+ print(rv5.Index(42).Interface()) // @types
+}
+
+func reflectValueElem() {
+ // Interface.
+ var iface interface{} = &a
+ rv1 := reflect.ValueOf(&iface).Elem()
+ print(rv1.Interface()) // @types *int
+ print(rv1.Interface().(*int)) // @pointsto main.a
+ print(rv1.Elem().Interface()) // @types *int
+ print(rv1.Elem().Interface().(*int)) // @pointsto main.a
+
+ print(reflect.ValueOf(new(interface{})).Elem().Elem()) // @types
+
+ // Pointer.
+ ptr := &a
+ rv2 := reflect.ValueOf(&ptr)
+ print(rv2.Elem().Interface()) // @types *int
+ print(rv2.Elem().Interface().(*int)) // @pointsto main.a
+
+ // No other type works with (rV).Elem, not even those that
+ // work with (rT).Elem: slice, array, map, chan.
+
+ rv3 := reflect.ValueOf([]*int{&a})
+ print(rv3.Elem().Interface()) // @types
+
+ rv4 := reflect.ValueOf([10]*int{&a})
+ print(rv4.Elem().Interface()) // @types
+
+ rv5 := reflect.ValueOf(map[*int]*int{&a: &b})
+ print(rv5.Elem().Interface()) // @types
+
+ ch := make(chan *int)
+ ch <- &a
+ rv6 := reflect.ValueOf(ch)
+ print(rv6.Elem().Interface()) // @types
+
+ rv7 := reflect.ValueOf(3)
+ print(rv7.Elem().Interface()) // @types
+}
+
+func reflectTypeElem() {
+ rt1 := reflect.TypeOf(make([]*int, 0))
+ print(reflect.Zero(rt1.Elem())) // @types *int
+
+ rt2 := reflect.TypeOf([10]*int{})
+ print(reflect.Zero(rt2.Elem())) // @types *int
+
+ rt3 := reflect.TypeOf(map[*int]*int{})
+ print(reflect.Zero(rt3.Elem())) // @types *int
+
+ rt4 := reflect.TypeOf(make(chan *int))
+ print(reflect.Zero(rt4.Elem())) // @types *int
+
+ ptr := &a
+ rt5 := reflect.TypeOf(&ptr)
+ print(reflect.Zero(rt5.Elem())) // @types *int
+
+ rt6 := reflect.TypeOf(3)
+ print(reflect.Zero(rt6.Elem())) // @types
+}
+
+func reflectPtrTo() {
+ tInt := reflect.TypeOf(3)
+ tPtrInt := reflect.PtrTo(tInt)
+ print(reflect.Zero(tPtrInt)) // @types *int
+ tPtrPtrInt := reflect.PtrTo(tPtrInt)
+ print(reflect.Zero(tPtrPtrInt)) // @types **int
+}
+
+func reflectSliceOf() {
+ tInt := reflect.TypeOf(3)
+ tSliceInt := reflect.SliceOf(tInt)
+ print(reflect.Zero(tSliceInt)) // @types []int
+}
+
+type T struct{ x int }
+
+func reflectMakeSlice() {
+ rt := []reflect.Type{
+ reflect.TypeOf(3),
+ reflect.TypeOf([]int{}),
+ reflect.TypeOf([]T{}),
+ }[0]
+ sl := reflect.MakeSlice(rt, 0, 0)
+ print(sl) // @types []int | []T
+ print(sl) // @pointsto |
+ print(&sl.Interface().([]T)[0].x) // @pointsto [*].x
+}
+
+func main() {
+ reflectValueSlice()
+ reflectValueBytes()
+ reflectValueIndex()
+ reflectValueElem()
+ reflectTypeElem()
+ reflectPtrTo()
+ reflectSliceOf()
+ reflectMakeSlice()
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/arrays.go b/llgo/third_party/go.tools/go/pointer/testdata/arrays.go
new file mode 100644
index 0000000000000000000000000000000000000000..e57a15b4be72e70259725aef5359f194715aacbe
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/arrays.go
@@ -0,0 +1,97 @@
+// +build ignore
+
+package main
+
+var unknown bool // defeat dead-code elimination
+
+var a, b int
+
+func array1() {
+ sliceA := make([]*int, 10) // @line a1make
+ sliceA[0] = &a
+
+ var sliceB []*int
+ sliceB = append(sliceB, &b) // @line a1append
+
+ print(sliceA) // @pointsto makeslice@a1make:16
+ print(sliceA[0]) // @pointsto main.a
+
+ print(sliceB) // @pointsto append@a1append:17
+ print(sliceB[100]) // @pointsto main.b
+}
+
+func array2() {
+ sliceA := make([]*int, 10) // @line a2make
+ sliceA[0] = &a
+
+ sliceB := sliceA[:]
+
+ print(sliceA) // @pointsto makeslice@a2make:16
+ print(sliceA[0]) // @pointsto main.a
+
+ print(sliceB) // @pointsto makeslice@a2make:16
+ print(sliceB[0]) // @pointsto main.a
+}
+
+func array3() {
+ a := []interface{}{"", 1}
+ b := []interface{}{true, func() {}}
+ print(a[0]) // @types string | int
+ print(b[0]) // @types bool | func()
+}
+
+// Test of append, copy, slice.
+func array4() {
+ var s2 struct { // @line a4L0
+ a [3]int
+ b struct{ c, d int }
+ }
+ var sl1 = make([]*int, 10) // @line a4make
+ var someint int // @line a4L1
+ sl1[1] = &someint
+ sl2 := append(sl1, &s2.a[1]) // @line a4append1
+ print(sl1) // @pointsto makeslice@a4make:16
+ print(sl2) // @pointsto append@a4append1:15 | makeslice@a4make:16
+ print(sl1[0]) // @pointsto someint@a4L1:6 | s2.a[*]@a4L0:6
+ print(sl2[0]) // @pointsto someint@a4L1:6 | s2.a[*]@a4L0:6
+
+ // In z=append(x,y) we should observe flow from y[*] to x[*].
+ var sl3 = make([]*int, 10) // @line a4L2
+ _ = append(sl3, &s2.a[1])
+ print(sl3) // @pointsto makeslice@a4L2:16
+ print(sl3[0]) // @pointsto s2.a[*]@a4L0:6
+
+ var sl4 = []*int{&a} // @line a4L3
+ sl4a := append(sl4) // @line a4L4
+ print(sl4a) // @pointsto slicelit@a4L3:18 | append@a4L4:16
+ print(&sl4a[0]) // @pointsto slicelit[*]@a4L3:18 | append[*]@a4L4:16
+ print(sl4a[0]) // @pointsto main.a
+
+ var sl5 = []*int{&b} // @line a4L5
+ copy(sl5, sl4)
+ print(sl5) // @pointsto slicelit@a4L5:18
+ print(&sl5[0]) // @pointsto slicelit[*]@a4L5:18
+ print(sl5[0]) // @pointsto main.b | main.a
+
+ var sl6 = sl5[:0]
+ print(sl6) // @pointsto slicelit@a4L5:18
+ print(&sl6[0]) // @pointsto slicelit[*]@a4L5:18
+ print(sl6[0]) // @pointsto main.b | main.a
+}
+
+func array5() {
+ var arr [2]*int
+ arr[0] = &a
+ arr[1] = &b
+
+ var n int
+ print(arr[n]) // @pointsto main.a | main.b
+}
+
+func main() {
+ array1()
+ array2()
+ array3()
+ array4()
+ array5()
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/channels.go b/llgo/third_party/go.tools/go/pointer/testdata/channels.go
new file mode 100644
index 0000000000000000000000000000000000000000..76eb5f8c10b10647402a4580bf16aa563bbb5084
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/channels.go
@@ -0,0 +1,118 @@
+// +build ignore
+
+package main
+
+func incr(x int) int { return x + 1 }
+
+func decr(x int) int { return x - 1 }
+
+var unknown bool // defeat dead-code elimination
+
+func chan1() {
+ chA := make(chan func(int) int, 0) // @line c1makeA
+ chB := make(chan func(int) int, 0) // @line c1makeB
+ chA <- incr
+ chB <- decr
+ chB <- func(int) int { return 1 }
+
+ print(chA) // @pointsto makechan@c1makeA:13
+ print(<-chA) // @pointsto main.incr
+
+ print(chB) // @pointsto makechan@c1makeB:13
+ print(<-chB) // @pointsto main.decr | main.chan1$1
+}
+
+func chan2() {
+ chA := make(chan func(int) int, 0) // @line c2makeA
+ chB := make(chan func(int) int, 0) // @line c2makeB
+ chA <- incr
+ chB <- decr
+ chB <- func(int) int { return 1 }
+
+ // Channels flow together.
+ // Labelsets remain distinct but elements are merged.
+ chAB := chA
+ if unknown {
+ chAB = chB
+ }
+
+ print(chA) // @pointsto makechan@c2makeA:13
+ print(<-chA) // @pointsto main.incr
+
+ print(chB) // @pointsto makechan@c2makeB:13
+ print(<-chB) // @pointsto main.decr | main.chan2$1
+
+ print(chAB) // @pointsto makechan@c2makeA:13 | makechan@c2makeB:13
+ print(<-chAB) // @pointsto main.incr | main.decr | main.chan2$1
+
+ (<-chA)(3)
+}
+
+// @calls main.chan2 -> main.incr
+
+func chan3() {
+ chA := make(chan func(int) int, 0) // @line c3makeA
+ chB := make(chan func(int) int, 0) // @line c3makeB
+ chA <- incr
+ chB <- decr
+ chB <- func(int) int { return 1 }
+ print(chA) // @pointsto makechan@c3makeA:13
+ print(<-chA) // @pointsto main.incr
+ print(chB) // @pointsto makechan@c3makeB:13
+ print(<-chB) // @pointsto main.decr | main.chan3$1
+
+ (<-chA)(3)
+}
+
+// @calls main.chan3 -> main.incr
+
+func chan4() {
+ chA := make(chan func(int) int, 0) // @line c4makeA
+ chB := make(chan func(int) int, 0) // @line c4makeB
+
+ select {
+ case chA <- incr:
+ case chB <- decr:
+ case a := <-chA:
+ print(a) // @pointsto main.incr
+ case b := <-chB:
+ print(b) // @pointsto main.decr
+ default:
+ print(chA) // @pointsto makechan@c4makeA:13
+ print(chB) // @pointsto makechan@c4makeB:13
+ }
+
+ for k := range chA {
+ print(k) // @pointsto main.incr
+ }
+ // Exercise constraint generation (regtest for a crash).
+ for _ = range chA {
+ }
+}
+
+// Multi-word channel value in select with multiple receive cases.
+// (Regtest for a crash.)
+func chan5() {
+ type T struct {
+ x *int
+ y interface{}
+ }
+ ch := make(chan T)
+ ch <- T{new(int), incr} // @line ch5new
+ select {
+ case a := <-ch:
+ print(a.x) // @pointsto new@ch5new:13
+ print(a.y) // @types func(x int) int
+ case b := <-ch:
+ print(b.x) // @pointsto new@ch5new:13
+ print(b.y) // @types func(x int) int
+ }
+}
+
+func main() {
+ chan1()
+ chan2()
+ chan3()
+ chan4()
+ chan5()
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/chanreflect.go b/llgo/third_party/go.tools/go/pointer/testdata/chanreflect.go
new file mode 100644
index 0000000000000000000000000000000000000000..7d22efeb6cde547949200b25732979df3cc075d7
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/chanreflect.go
@@ -0,0 +1,85 @@
+// +build ignore
+
+package main
+
+import "reflect"
+
+// Test of channels with reflection.
+
+var a, b int
+
+func chanreflect1() {
+ ch := make(chan *int, 0) // @line cr1make
+ crv := reflect.ValueOf(ch)
+ crv.Send(reflect.ValueOf(&a))
+ print(crv.Interface()) // @types chan *int
+ print(crv.Interface().(chan *int)) // @pointsto makechan@cr1make:12
+ print(<-ch) // @pointsto main.a
+}
+
+func chanreflect1i() {
+ // Exercises reflect.Value conversions to/from interfaces:
+ // a different code path than for concrete types.
+ ch := make(chan interface{}, 0)
+ reflect.ValueOf(ch).Send(reflect.ValueOf(&a))
+ v := <-ch
+ print(v) // @types *int
+ print(v.(*int)) // @pointsto main.a
+}
+
+func chanreflect2() {
+ ch := make(chan *int, 0)
+ ch <- &b
+ crv := reflect.ValueOf(ch)
+ r, _ := crv.Recv()
+ print(r.Interface()) // @types *int
+ print(r.Interface().(*int)) // @pointsto main.b
+}
+
+func chanOfRecv() {
+ // MakeChan(<-chan) is a no-op.
+ t := reflect.ChanOf(reflect.RecvDir, reflect.TypeOf(&a))
+ print(reflect.Zero(t).Interface()) // @types <-chan *int
+ print(reflect.MakeChan(t, 0).Interface().(<-chan *int)) // @pointsto
+ print(reflect.MakeChan(t, 0).Interface().(chan *int)) // @pointsto
+}
+
+func chanOfSend() {
+ // MakeChan(chan<-) is a no-op.
+ t := reflect.ChanOf(reflect.SendDir, reflect.TypeOf(&a))
+ print(reflect.Zero(t).Interface()) // @types chan<- *int
+ print(reflect.MakeChan(t, 0).Interface().(chan<- *int)) // @pointsto
+ print(reflect.MakeChan(t, 0).Interface().(chan *int)) // @pointsto
+}
+
+func chanOfBoth() {
+ t := reflect.ChanOf(reflect.BothDir, reflect.TypeOf(&a))
+ print(reflect.Zero(t).Interface()) // @types chan *int
+ ch := reflect.MakeChan(t, 0)
+ print(ch.Interface().(chan *int)) // @pointsto
+ ch.Send(reflect.ValueOf(&b))
+ ch.Interface().(chan *int) <- &a
+ r, _ := ch.Recv()
+ print(r.Interface().(*int)) // @pointsto main.a | main.b
+ print(<-ch.Interface().(chan *int)) // @pointsto main.a | main.b
+}
+
+var unknownDir reflect.ChanDir // not a constant
+
+func chanOfUnknown() {
+ // Unknown channel direction: assume all three.
+ // MakeChan only works on the bi-di channel type.
+ t := reflect.ChanOf(unknownDir, reflect.TypeOf(&a))
+ print(reflect.Zero(t).Interface()) // @types <-chan *int | chan<- *int | chan *int
+ print(reflect.MakeChan(t, 0).Interface()) // @types chan *int
+}
+
+func main() {
+ chanreflect1()
+ chanreflect1i()
+ chanreflect2()
+ chanOfRecv()
+ chanOfSend()
+ chanOfBoth()
+ chanOfUnknown()
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/chanreflect1.go b/llgo/third_party/go.tools/go/pointer/testdata/chanreflect1.go
new file mode 100644
index 0000000000000000000000000000000000000000..c5e258743331264084533ec5459fd689acdb8e71
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/chanreflect1.go
@@ -0,0 +1,35 @@
+// +build ignore
+
+package main
+
+import "reflect"
+
+//
+// This test is very sensitive to line-number perturbations!
+
+// Test of channels with reflection.
+
+var a, b int
+
+func chanreflect1() {
+ ch := make(chan *int, 0)
+ crv := reflect.ValueOf(ch)
+ crv.Send(reflect.ValueOf(&a))
+ print(crv.Interface()) // @types chan *int
+ print(crv.Interface().(chan *int)) // @pointsto makechan@testdata/chanreflect.go:15:12
+ print(<-ch) // @pointsto main.a
+}
+
+func chanreflect2() {
+ ch := make(chan *int, 0)
+ ch <- &b
+ crv := reflect.ValueOf(ch)
+ r, _ := crv.Recv()
+ print(r.Interface()) // @types *int
+ print(r.Interface().(*int)) // @pointsto main.b
+}
+
+func main() {
+ chanreflect1()
+ chanreflect2()
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/context.go b/llgo/third_party/go.tools/go/pointer/testdata/context.go
new file mode 100644
index 0000000000000000000000000000000000000000..ed616e7ecaee59c9bb1551e0a62db69a02ecee77
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/context.go
@@ -0,0 +1,48 @@
+// +build ignore
+
+package main
+
+// Test of context-sensitive treatment of certain function calls,
+// e.g. static calls to simple accessor methods.
+
+var a, b int
+
+type T struct{ x *int }
+
+func (t *T) SetX(x *int) { t.x = x }
+func (t *T) GetX() *int { return t.x }
+
+func context1() {
+ var t1, t2 T
+ t1.SetX(&a)
+ t2.SetX(&b)
+ print(t1.GetX()) // @pointsto main.a
+ print(t2.GetX()) // @pointsto main.b
+}
+
+func context2() {
+ id := func(x *int) *int {
+ print(x) // @pointsto main.a | main.b
+ return x
+ }
+ print(id(&a)) // @pointsto main.a
+ print(id(&b)) // @pointsto main.b
+
+ // Same again, but anon func has free vars.
+ var c int // @line context2c
+ id2 := func(x *int) (*int, *int) {
+ print(x) // @pointsto main.a | main.b
+ return x, &c
+ }
+ p, q := id2(&a)
+ print(p) // @pointsto main.a
+ print(q) // @pointsto c@context2c:6
+ r, s := id2(&b)
+ print(r) // @pointsto main.b
+ print(s) // @pointsto c@context2c:6
+}
+
+func main() {
+ context1()
+ context2()
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/conv.go b/llgo/third_party/go.tools/go/pointer/testdata/conv.go
new file mode 100644
index 0000000000000000000000000000000000000000..692f0ceba6191a0159ef858d403857644c41f351
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/conv.go
@@ -0,0 +1,63 @@
+// +build ignore
+
+package main
+
+import "unsafe"
+
+var a int
+
+func conv1() {
+ // Conversions of channel direction.
+ ch := make(chan int) // @line c1make
+ print((<-chan int)(ch)) // @pointsto makechan@c1make:12
+ print((chan<- int)(ch)) // @pointsto makechan@c1make:12
+}
+
+func conv2() {
+ // string -> []byte/[]rune conversion
+ s := "foo"
+ ba := []byte(s) // @line c2ba
+ ra := []rune(s) // @line c2ra
+ print(ba) // @pointsto convert@c2ba:14
+ print(ra) // @pointsto convert@c2ra:14
+}
+
+func conv3() {
+ // Conversion of same underlying types.
+ type PI *int
+ pi := PI(&a)
+ print(pi) // @pointsto main.a
+
+ pint := (*int)(pi)
+ print(pint) // @pointsto main.a
+
+ // Conversions between pointers to identical base types.
+ var y *PI = &pi
+ var x **int = (**int)(y)
+ print(*x) // @pointsto main.a
+ print(*y) // @pointsto main.a
+ y = (*PI)(x)
+ print(*y) // @pointsto main.a
+}
+
+func conv4() {
+ // Handling of unsafe.Pointer conversion is unsound:
+ // we lose the alias to main.a and get something like new(int) instead.
+ p := (*int)(unsafe.Pointer(&a)) // @line c2p
+ print(p) // @pointsto convert@c2p:13
+}
+
+// Regression test for b/8231.
+func conv5() {
+ type P unsafe.Pointer
+ var i *struct{}
+ _ = P(i)
+}
+
+func main() {
+ conv1()
+ conv2()
+ conv3()
+ conv4()
+ conv5()
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/finalizer.go b/llgo/third_party/go.tools/go/pointer/testdata/finalizer.go
new file mode 100644
index 0000000000000000000000000000000000000000..97f25c9047427f952c549f59b503afcd8c74d6b6
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/finalizer.go
@@ -0,0 +1,89 @@
+package main
+
+import "runtime"
+
+func final1a(x *int) int {
+ print(x) // @pointsto new@newint:10
+ return *x
+}
+
+func final1b(x *bool) {
+ print(x) // @pointsto
+}
+
+func runtimeSetFinalizer1() {
+ x := new(int) // @line newint
+ runtime.SetFinalizer(x, final1a) // ok: final1a's result is ignored
+ runtime.SetFinalizer(x, final1b) // param type mismatch: no effect
+}
+
+// @calls main.runtimeSetFinalizer1 -> main.final1a
+// @calls main.runtimeSetFinalizer1 -> main.final1b
+
+func final2a(x *bool) {
+ print(x) // @pointsto new@newbool1:10 | new@newbool2:10
+}
+
+func final2b(x *bool) {
+ print(x) // @pointsto new@newbool1:10 | new@newbool2:10
+}
+
+func runtimeSetFinalizer2() {
+ x := new(bool) // @line newbool1
+ f := final2a
+ if unknown {
+ x = new(bool) // @line newbool2
+ f = final2b
+ }
+ runtime.SetFinalizer(x, f)
+}
+
+// @calls main.runtimeSetFinalizer2 -> main.final2a
+// @calls main.runtimeSetFinalizer2 -> main.final2b
+
+type T int
+
+func (t *T) finalize() {
+ print(t) // @pointsto new@final3:10
+}
+
+func runtimeSetFinalizer3() {
+ x := new(T) // @line final3
+ runtime.SetFinalizer(x, (*T).finalize)
+}
+
+// @calls main.runtimeSetFinalizer3 -> (*main.T).finalize$thunk
+
+// I hope I never live to see this code in the wild.
+var setFinalizer = runtime.SetFinalizer
+
+func final4(x *int) {
+ print(x) // @pointsto new@finalIndirect:10
+}
+
+func runtimeSetFinalizerIndirect() {
+ // In an indirect call, the shared contour for SetFinalizer is
+ // used, i.e. the call is not inlined and appears in the call graph.
+ x := new(int) // @line finalIndirect
+ setFinalizer(x, final4)
+}
+
+// Exercise the elimination of SetFinalizer
+// constraints with non-pointer operands.
+func runtimeSetFinalizerNonpointer() {
+ runtime.SetFinalizer(nil, (*T).finalize) // x is a non-pointer
+ runtime.SetFinalizer((*T).finalize, nil) // f is a non-pointer
+}
+
+// @calls main.runtimeSetFinalizerIndirect -> runtime.SetFinalizer
+// @calls runtime.SetFinalizer -> main.final4
+
+func main() {
+ runtimeSetFinalizer1()
+ runtimeSetFinalizer2()
+ runtimeSetFinalizer3()
+ runtimeSetFinalizerIndirect()
+ runtimeSetFinalizerNonpointer()
+}
+
+var unknown bool // defeat dead-code elimination
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/flow.go b/llgo/third_party/go.tools/go/pointer/testdata/flow.go
new file mode 100644
index 0000000000000000000000000000000000000000..6fb599e8d89f479494be15e10c81ff236da0bd3a
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/flow.go
@@ -0,0 +1,63 @@
+// +build ignore
+
+package main
+
+// Demonstration of directionality of flow edges.
+
+func f1() {}
+func f2() {}
+
+var somepred bool
+
+// Tracking functions.
+func flow1() {
+ s := f1
+ p := f2
+ q := p
+ r := q
+ if somepred {
+ r = s
+ }
+ print(s) // @pointsto main.f1
+ print(p) // @pointsto main.f2
+ print(q) // @pointsto main.f2
+ print(r) // @pointsto main.f1 | main.f2
+}
+
+// Tracking concrete types in interfaces.
+func flow2() {
+ var s interface{} = 1
+ var p interface{} = "foo"
+ q := p
+ r := q
+ if somepred {
+ r = s
+ }
+ print(s) // @types int
+ print(p) // @types string
+ print(q) // @types string
+ print(r) // @types int | string
+}
+
+var g1, g2 int
+
+// Tracking addresses of globals.
+func flow3() {
+ s := &g1
+ p := &g2
+ q := p
+ r := q
+ if somepred {
+ r = s
+ }
+ print(s) // @pointsto main.g1
+ print(p) // @pointsto main.g2
+ print(q) // @pointsto main.g2
+ print(r) // @pointsto main.g2 | main.g1
+}
+
+func main() {
+ flow1()
+ flow2()
+ flow3()
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/fmtexcerpt.go b/llgo/third_party/go.tools/go/pointer/testdata/fmtexcerpt.go
new file mode 100644
index 0000000000000000000000000000000000000000..ee2a0e76c7f104d09819921005d384b0129f15ba
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/fmtexcerpt.go
@@ -0,0 +1,42 @@
+// +build ignore
+
+// This is a slice of the fmt package.
+
+package main
+
+type pp struct {
+ field interface{}
+}
+
+func newPrinter() *pp {
+ return new(pp)
+}
+
+func Fprintln(a ...interface{}) {
+ p := newPrinter()
+ p.doPrint(a, true, true)
+}
+
+func Println(a ...interface{}) {
+ Fprintln(a...)
+}
+
+func (p *pp) doPrint(a []interface{}, addspace, addnewline bool) {
+ print(a[0]) // @types S | string
+ stringer := a[0].(interface {
+ String() string
+ })
+
+ stringer.String()
+ print(stringer) // @types S
+}
+
+type S int
+
+func (S) String() string { return "" }
+
+func main() {
+ Println("Hello, World!", S(0))
+}
+
+// @calls (*main.pp).doPrint -> (main.S).String
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/func.go b/llgo/third_party/go.tools/go/pointer/testdata/func.go
new file mode 100644
index 0000000000000000000000000000000000000000..2155f8ef71544b813ffdf13bf16771e45e104d90
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/func.go
@@ -0,0 +1,205 @@
+// +build ignore
+
+package main
+
+var a, b, c int
+
+var unknown bool // defeat dead-code elimination
+
+func func1() {
+ var h int // @line f1h
+ f := func(x *int) *int {
+ if unknown {
+ return &b
+ }
+ return x
+ }
+
+ // FV(g) = {f, h}
+ g := func(x *int) *int {
+ if unknown {
+ return &h
+ }
+ return f(x)
+ }
+
+ print(g(&a)) // @pointsto main.a | main.b | h@f1h:6
+ print(f(&a)) // @pointsto main.a | main.b
+ print(&a) // @pointsto main.a
+}
+
+// @calls main.func1 -> main.func1$2
+// @calls main.func1 -> main.func1$1
+// @calls main.func1$2 -> main.func1$1
+
+func func2() {
+ var x, y *int
+ defer func() {
+ x = &a
+ }()
+ go func() {
+ y = &b
+ }()
+ print(x) // @pointsto main.a
+ print(y) // @pointsto main.b
+}
+
+func func3() {
+ x, y := func() (x, y *int) {
+ x = &a
+ y = &b
+ if unknown {
+ return nil, &c
+ }
+ return
+ }()
+ print(x) // @pointsto main.a
+ print(y) // @pointsto main.b | main.c
+}
+
+func swap(x, y *int) (*int, *int) { // @line swap
+ print(&x) // @pointsto x@swap:11
+ print(x) // @pointsto makeslice[*]@func4make:11
+ print(&y) // @pointsto y@swap:14
+ print(y) // @pointsto j@f4j:5
+ return y, x
+}
+
+func func4() {
+ a := make([]int, 10) // @line func4make
+ i, j := 123, 456 // @line f4j
+ _ = i
+ p, q := swap(&a[3], &j)
+ print(p) // @pointsto j@f4j:5
+ print(q) // @pointsto makeslice[*]@func4make:11
+
+ f := &b
+ print(f) // @pointsto main.b
+}
+
+type T int
+
+func (t *T) f(x *int) *int {
+ print(t) // @pointsto main.a
+ print(x) // @pointsto main.c
+ return &b
+}
+
+func (t *T) g(x *int) *int {
+ print(t) // @pointsto main.a
+ print(x) // @pointsto main.b
+ return &c
+}
+
+func (t *T) h(x *int) *int {
+ print(t) // @pointsto main.a
+ print(x) // @pointsto main.b
+ return &c
+}
+
+var h func(*T, *int) *int
+
+func func5() {
+ // Static call of method.
+ t := (*T)(&a)
+ print(t.f(&c)) // @pointsto main.b
+
+ // Static call of method as function
+ print((*T).g(t, &b)) // @pointsto main.c
+
+ // Dynamic call (not invoke) of method.
+ h = (*T).h
+ print(h(t, &b)) // @pointsto main.c
+}
+
+// @calls main.func5 -> (*main.T).f
+// @calls main.func5 -> (*main.T).g$thunk
+// @calls main.func5 -> (*main.T).h$thunk
+
+func func6() {
+ A := &a
+ f := func() *int {
+ return A // (free variable)
+ }
+ print(f()) // @pointsto main.a
+}
+
+// @calls main.func6 -> main.func6$1
+
+type I interface {
+ f()
+}
+
+type D struct{}
+
+func (D) f() {}
+
+func func7() {
+ var i I = D{}
+ imethodClosure := i.f
+ imethodClosure()
+ // @calls main.func7 -> (main.I).f$bound
+ // @calls (main.I).f$bound -> (main.D).f
+
+ var d D
+ cmethodClosure := d.f
+ cmethodClosure()
+ // @calls main.func7 -> (main.D).f$bound
+ // @calls (main.D).f$bound ->(main.D).f
+
+ methodExpr := D.f
+ methodExpr(d)
+ // @calls main.func7 -> (main.D).f$thunk
+}
+
+func func8(x ...int) {
+ print(&x[0]) // @pointsto varargs[*]@varargs:15
+}
+
+type E struct {
+ x1, x2, x3, x4, x5 *int
+}
+
+func (e E) f() {}
+
+func func9() {
+ // Regression test for bug reported by Jon Valdes on golang-dev, Jun 19 2014.
+ // The receiver of a bound method closure may be of a multi-node type, E.
+ // valueNode was reserving only a single node for it, so the
+ // nodes used by the immediately following constraints
+ // (e.g. param 'i') would get clobbered.
+
+ var e E
+ e.x1 = &a
+ e.x2 = &a
+ e.x3 = &a
+ e.x4 = &a
+ e.x5 = &a
+
+ _ = e.f // form a closure---must reserve sizeof(E) nodes
+
+ func(i I) {
+ i.f() // must not crash the solver
+ }(new(D))
+
+ print(e.x1) // @pointsto main.a
+ print(e.x2) // @pointsto main.a
+ print(e.x3) // @pointsto main.a
+ print(e.x4) // @pointsto main.a
+ print(e.x5) // @pointsto main.a
+}
+
+func main() {
+ func1()
+ func2()
+ func3()
+ func4()
+ func5()
+ func6()
+ func7()
+ func8(1, 2, 3) // @line varargs
+ func9()
+}
+
+// @calls -> main.main
+// @calls -> main.init
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/funcreflect.go b/llgo/third_party/go.tools/go/pointer/testdata/funcreflect.go
new file mode 100644
index 0000000000000000000000000000000000000000..a0a9a5faaa88a90b92d04deaac9fce79b59831eb
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/funcreflect.go
@@ -0,0 +1,130 @@
+// +build ignore
+
+package main
+
+import "reflect"
+
+var zero, a, b int
+var false2 bool
+
+func f(p *int, q hasF) *int {
+ print(p) // @pointsto main.a
+ print(q) // @types *T
+ print(q.(*T)) // @pointsto new@newT1:22
+ return &b
+}
+
+func g(p *bool) (*int, *bool, hasF) {
+ return &b, p, new(T) // @line newT2
+}
+
+func reflectValueCall() {
+ rvf := reflect.ValueOf(f)
+ res := rvf.Call([]reflect.Value{
+ // argument order is not significant:
+ reflect.ValueOf(new(T)), // @line newT1
+ reflect.ValueOf(&a),
+ })
+ print(res[0].Interface()) // @types *int
+ print(res[0].Interface().(*int)) // @pointsto main.b
+}
+
+// @calls main.reflectValueCall -> main.f
+
+func reflectValueCallIndirect() {
+ rvf := reflect.ValueOf(g)
+ call := rvf.Call // kids, don't try this at home
+
+ // Indirect call uses shared contour.
+ //
+ // Also notice that argument position doesn't matter, and args
+ // of inappropriate type (e.g. 'a') are ignored.
+ res := call([]reflect.Value{
+ reflect.ValueOf(&a),
+ reflect.ValueOf(&false2),
+ })
+ res0 := res[0].Interface()
+ print(res0) // @types *int | *bool | *T
+ print(res0.(*int)) // @pointsto main.b
+ print(res0.(*bool)) // @pointsto main.false2
+ print(res0.(hasF)) // @types *T
+ print(res0.(*T)) // @pointsto new@newT2:19
+}
+
+// @calls main.reflectValueCallIndirect -> (reflect.Value).Call$bound
+// @calls (reflect.Value).Call$bound -> main.g
+
+func reflectTypeInOut() {
+ var f func(float64, bool) (string, int)
+ print(reflect.Zero(reflect.TypeOf(f).In(0)).Interface()) // @types float64
+ print(reflect.Zero(reflect.TypeOf(f).In(1)).Interface()) // @types bool
+ print(reflect.Zero(reflect.TypeOf(f).In(-1)).Interface()) // @types float64 | bool
+ print(reflect.Zero(reflect.TypeOf(f).In(zero)).Interface()) // @types float64 | bool
+
+ print(reflect.Zero(reflect.TypeOf(f).Out(0)).Interface()) // @types string
+ print(reflect.Zero(reflect.TypeOf(f).Out(1)).Interface()) // @types int
+ print(reflect.Zero(reflect.TypeOf(f).Out(2)).Interface()) // @types
+
+ print(reflect.Zero(reflect.TypeOf(3).Out(0)).Interface()) // @types
+}
+
+type hasF interface {
+ F()
+}
+
+type T struct{}
+
+func (T) F() {}
+func (T) g(int) {}
+
+type U struct{}
+
+func (U) F(int) {}
+func (U) g(string) {}
+
+type I interface {
+ f()
+}
+
+var nonconst string
+
+func reflectTypeMethodByName() {
+ TU := reflect.TypeOf([]interface{}{T{}, U{}}[0])
+ print(reflect.Zero(TU)) // @types T | U
+
+ F, _ := TU.MethodByName("F")
+ print(reflect.Zero(F.Type)) // @types func(T) | func(U, int)
+ print(F.Func) // @pointsto (main.T).F | (main.U).F
+
+ g, _ := TU.MethodByName("g")
+ print(reflect.Zero(g.Type)) // @types func(T, int) | func(U, string)
+ print(g.Func) // @pointsto (main.T).g | (main.U).g
+
+ // Non-literal method names are treated less precisely.
+ U := reflect.TypeOf(U{})
+ X, _ := U.MethodByName(nonconst)
+ print(reflect.Zero(X.Type)) // @types func(U, int) | func(U, string)
+ print(X.Func) // @pointsto (main.U).F | (main.U).g
+
+ // Interface methods.
+ rThasF := reflect.TypeOf(new(hasF)).Elem()
+ print(reflect.Zero(rThasF)) // @types hasF
+ F2, _ := rThasF.MethodByName("F")
+ print(reflect.Zero(F2.Type)) // @types func()
+ print(F2.Func) // @pointsto
+
+}
+
+func reflectTypeMethod() {
+ m := reflect.TypeOf(T{}).Method(0)
+ print(reflect.Zero(m.Type)) // @types func(T) | func(T, int)
+ print(m.Func) // @pointsto (main.T).F | (main.T).g
+}
+
+func main() {
+ reflectValueCall()
+ reflectValueCallIndirect()
+ reflectTypeInOut()
+ reflectTypeMethodByName()
+ reflectTypeMethod()
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/hello.go b/llgo/third_party/go.tools/go/pointer/testdata/hello.go
new file mode 100644
index 0000000000000000000000000000000000000000..b81784b22a59236967eda07236e9a75d424c2419
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/hello.go
@@ -0,0 +1,27 @@
+// +build ignore
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+type S int
+
+var theS S
+
+func (s *S) String() string {
+ print(s) // @pointsto main.theS
+ return ""
+}
+
+func main() {
+ // os.Args is considered intrinsically allocated,
+ // but may also be set explicitly (e.g. on Windows), hence '...'.
+ print(os.Args) // @pointsto | ...
+ fmt.Println("Hello, World!", &theS)
+}
+
+// @calls main.main -> fmt.Println
+// @calls (*fmt.pp).handleMethods -> (*main.S).String
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/interfaces.go b/llgo/third_party/go.tools/go/pointer/testdata/interfaces.go
new file mode 100644
index 0000000000000000000000000000000000000000..91c0fa9a9036fea7004cfae40c975e0aa4e2bc06
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/interfaces.go
@@ -0,0 +1,152 @@
+// +build ignore
+
+package main
+
+type I interface {
+ f()
+}
+
+type C int
+
+func (*C) f() {}
+
+type D struct{ ptr *int }
+
+func (D) f() {}
+
+type E struct{}
+
+func (*E) f() {}
+
+var a, b int
+
+var unknown bool // defeat dead-code elimination
+
+func interface1() {
+ var i interface{} = &a
+ var j interface{} = D{&b}
+ k := j
+ if unknown {
+ k = i
+ }
+
+ print(i) // @types *int
+ print(j) // @types D
+ print(k) // @types *int | D
+
+ print(i.(*int)) // @pointsto main.a
+ print(j.(*int)) // @pointsto
+ print(k.(*int)) // @pointsto main.a
+
+ print(i.(D).ptr) // @pointsto
+ print(j.(D).ptr) // @pointsto main.b
+ print(k.(D).ptr) // @pointsto main.b
+}
+
+func interface2() {
+ var i I = (*C)(&a)
+ var j I = D{&a}
+ k := j
+ if unknown {
+ k = i
+ }
+
+ print(i) // @types *C
+ print(j) // @types D
+ print(k) // @types *C | D
+ print(k) // @pointsto makeinterface:main.D | makeinterface:*main.C
+
+ k.f()
+ // @calls main.interface2 -> (*main.C).f
+ // @calls main.interface2 -> (main.D).f
+
+ print(i.(*C)) // @pointsto main.a
+ print(j.(D).ptr) // @pointsto main.a
+ print(k.(*C)) // @pointsto main.a
+
+ switch x := k.(type) {
+ case *C:
+ print(x) // @pointsto main.a
+ case D:
+ print(x.ptr) // @pointsto main.a
+ case *E:
+ print(x) // @pointsto
+ }
+}
+
+func interface3() {
+ // There should be no backflow of concrete types from the type-switch to x.
+ var x interface{} = 0
+ print(x) // @types int
+ switch x.(type) {
+ case int:
+ case string:
+ }
+}
+
+func interface4() {
+ var i interface{} = D{&a}
+ if unknown {
+ i = 123
+ }
+
+ print(i) // @types int | D
+
+ j := i.(I) // interface narrowing type-assertion
+ print(j) // @types D
+ print(j.(D).ptr) // @pointsto main.a
+
+ var l interface{} = j // interface widening assignment.
+ print(l) // @types D
+ print(l.(D).ptr) // @pointsto main.a
+
+ m := j.(interface{}) // interface widening type-assertion.
+ print(m) // @types D
+ print(m.(D).ptr) // @pointsto main.a
+}
+
+// Interface method calls and value flow:
+
+type J interface {
+ f(*int) *int
+}
+
+type P struct {
+ x int
+}
+
+func (p *P) f(pi *int) *int {
+ print(p) // @pointsto p@i5p:6
+ print(pi) // @pointsto i@i5i:6
+ return &p.x
+}
+
+func interface5() {
+ var p P // @line i5p
+ var j J = &p
+ var i int // @line i5i
+ print(j.f(&i)) // @pointsto p.x@i5p:6
+ print(&i) // @pointsto i@i5i:6
+
+ print(j) // @pointsto makeinterface:*main.P
+}
+
+// @calls main.interface5 -> (*main.P).f
+
+func interface6() {
+ f := I.f
+ print(f) // @pointsto (main.I).f$thunk
+ f(new(struct{ D }))
+}
+
+// @calls main.interface6 -> (main.I).f$thunk
+// @calls (main.I).f$thunk -> (*struct{main.D}).f
+
+func main() {
+ interface1()
+ interface2()
+ interface3()
+ interface4()
+ interface5()
+ interface6()
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/issue9002.go b/llgo/third_party/go.tools/go/pointer/testdata/issue9002.go
new file mode 100644
index 0000000000000000000000000000000000000000..b7c2c610903fcf5164050fdcfe1e5fb72a1acf08
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/issue9002.go
@@ -0,0 +1,17 @@
+package main
+
+func main() {
+ // Regression test for golang issue 9002.
+ //
+ // The two-result "value,ok" receive operation generated a
+ // too-wide constraint loading (value int, ok bool), not bool,
+ // from the channel.
+ //
+ // This bug manifested itself in an out-of-bounds array access
+ // when the makechan object was the highest-numbered node, as in
+ // this program.
+ //
+ // In more realistic programs it silently resulted in bogus
+ // constraints.
+ _, _ = <-make(chan int)
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/mapreflect.go b/llgo/third_party/go.tools/go/pointer/testdata/mapreflect.go
new file mode 100644
index 0000000000000000000000000000000000000000..bc5e7e6b7c37bdc790e229c6dbddf7292c6681b0
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/mapreflect.go
@@ -0,0 +1,117 @@
+// +build ignore
+
+package main
+
+// Test of maps with reflection.
+
+import "reflect"
+
+var a int
+var b bool
+
+func reflectMapKeysIndex() {
+ m := make(map[*int]*bool) // @line mr1make
+ m[&a] = &b
+
+ mrv := reflect.ValueOf(m)
+ print(mrv.Interface()) // @types map[*int]*bool
+ print(mrv.Interface().(map[*int]*bool)) // @pointsto makemap@mr1make:11
+ print(mrv) // @pointsto makeinterface:map[*int]*bool
+ print(mrv) // @types map[*int]*bool
+
+ keys := mrv.MapKeys()
+ print(keys) // @pointsto
+ for _, k := range keys {
+ print(k) // @pointsto
+ print(k) // @types *int
+ print(k.Interface()) // @types *int
+ print(k.Interface().(*int)) // @pointsto main.a
+
+ v := mrv.MapIndex(k)
+ print(v.Interface()) // @types *bool
+ print(v.Interface().(*bool)) // @pointsto main.b
+ }
+}
+
+func reflectSetMapIndex() {
+ m := make(map[*int]*bool)
+ mrv := reflect.ValueOf(m)
+ mrv.SetMapIndex(reflect.ValueOf(&a), reflect.ValueOf(&b))
+
+ print(m[nil]) // @pointsto main.b
+
+ for _, k := range mrv.MapKeys() {
+ print(k.Interface()) // @types *int
+ print(k.Interface().(*int)) // @pointsto main.a
+ }
+
+ tmap := reflect.TypeOf(m)
+ // types.EvalNode won't let us refer to non-exported types:
+ // print(tmap) // #@types *reflect.rtype
+ print(tmap) // @pointsto map[*int]*bool
+
+ zmap := reflect.Zero(tmap)
+ print(zmap) // @pointsto
+ print(zmap.Interface()) // @pointsto
+
+ print(tmap.Key()) // @pointsto *int
+ print(tmap.Elem()) // @pointsto *bool
+ print(reflect.Zero(tmap.Key())) // @pointsto
+ print(reflect.Zero(tmap.Key()).Interface()) // @pointsto
+ print(reflect.Zero(tmap.Key()).Interface()) // @types *int
+ print(reflect.Zero(tmap.Elem())) // @pointsto
+ print(reflect.Zero(tmap.Elem()).Interface()) // @pointsto
+ print(reflect.Zero(tmap.Elem()).Interface()) // @types *bool
+}
+
+func reflectSetMapIndexInterface() {
+ // Exercises reflect.Value conversions to/from interfaces:
+ // a different code path than for concrete types.
+ m := make(map[interface{}]interface{})
+ reflect.ValueOf(m).SetMapIndex(reflect.ValueOf(&a), reflect.ValueOf(&b))
+ for k, v := range m {
+ print(k) // @types *int
+ print(k.(*int)) // @pointsto main.a
+ print(v) // @types *bool
+ print(v.(*bool)) // @pointsto main.b
+ }
+}
+
+func reflectSetMapIndexAssignable() {
+ // SetMapIndex performs implicit assignability conversions.
+ type I *int
+ type J *int
+
+ str := reflect.ValueOf("")
+
+ // *int is assignable to I.
+ m1 := make(map[string]I)
+ reflect.ValueOf(m1).SetMapIndex(str, reflect.ValueOf(new(int))) // @line int
+ print(m1[""]) // @pointsto new@int:58
+
+ // I is assignable to I.
+ m2 := make(map[string]I)
+ reflect.ValueOf(m2).SetMapIndex(str, reflect.ValueOf(I(new(int)))) // @line I
+ print(m2[""]) // @pointsto new@I:60
+
+ // J is not assignable to I.
+ m3 := make(map[string]I)
+ reflect.ValueOf(m3).SetMapIndex(str, reflect.ValueOf(J(new(int))))
+ print(m3[""]) // @pointsto
+}
+
+func reflectMakeMap() {
+ t := reflect.TypeOf(map[*int]*bool(nil))
+ v := reflect.MakeMap(t)
+ print(v) // @types map[*int]*bool
+ print(v) // @pointsto
+}
+
+func main() {
+ reflectMapKeysIndex()
+ reflectSetMapIndex()
+ reflectSetMapIndexInterface()
+ reflectSetMapIndexAssignable()
+ reflectMakeMap()
+ // TODO(adonovan): reflect.MapOf(Type)
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/maps.go b/llgo/third_party/go.tools/go/pointer/testdata/maps.go
new file mode 100644
index 0000000000000000000000000000000000000000..6f3751d7fb31efb7ca42430d734a427627a8e794
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/maps.go
@@ -0,0 +1,51 @@
+// +build ignore
+
+package main
+
+// Test of maps.
+
+var a, b, c int
+
+func maps1() {
+ m1 := map[*int]*int{&a: &b} // @line m1m1
+ m2 := make(map[*int]*int) // @line m1m2
+ m2[&b] = &a
+
+ print(m1[nil]) // @pointsto main.b | main.c
+ print(m2[nil]) // @pointsto main.a
+
+ print(m1) // @pointsto makemap@m1m1:21
+ print(m2) // @pointsto makemap@m1m2:12
+
+ m1[&b] = &c
+
+ for k, v := range m1 {
+ print(k) // @pointsto main.a | main.b
+ print(v) // @pointsto main.b | main.c
+ }
+
+ for k, v := range m2 {
+ print(k) // @pointsto main.b
+ print(v) // @pointsto main.a
+ }
+
+ // Lookup doesn't create any aliases.
+ print(m2[&c]) // @pointsto main.a
+ if _, ok := m2[&a]; ok {
+ print(m2[&c]) // @pointsto main.a
+ }
+}
+
+func maps2() {
+ m1 := map[*int]*int{&a: &b}
+ m2 := map[*int]*int{&b: &c}
+ _ = []map[*int]*int{m1, m2} // (no spurious merging of m1, m2)
+
+ print(m1[nil]) // @pointsto main.b
+ print(m2[nil]) // @pointsto main.c
+}
+
+func main() {
+ maps1()
+ maps2()
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/panic.go b/llgo/third_party/go.tools/go/pointer/testdata/panic.go
new file mode 100644
index 0000000000000000000000000000000000000000..ee8a7668e07b40117a7ae6933172eea964ce1dd3
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/panic.go
@@ -0,0 +1,36 @@
+// +build ignore
+
+package main
+
+// Test of value flow from panic() to recover().
+// We model them as stores/loads of a global location.
+// We ignore concrete panic types originating from the runtime.
+
+var someval int
+
+type myPanic struct{}
+
+func f(int) {}
+
+func g() string { return "" }
+
+func deadcode() {
+ panic(123) // not reached
+}
+
+func main() {
+ switch someval {
+ case 0:
+ panic("oops")
+ case 1:
+ panic(myPanic{})
+ case 2:
+ panic(f)
+ case 3:
+ panic(g)
+ }
+ ex := recover()
+ print(ex) // @types myPanic | string | func(int) | func() string
+ print(ex.(func(int))) // @pointsto main.f
+ print(ex.(func() string)) // @pointsto main.g
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/recur.go b/llgo/third_party/go.tools/go/pointer/testdata/recur.go
new file mode 100644
index 0000000000000000000000000000000000000000..4c7229de94ac12bcdb71621ce7a0af330aed1a61
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/recur.go
@@ -0,0 +1,11 @@
+// +build ignore
+
+package main
+
+// Analysis abstraction of recursive calls is finite.
+
+func main() {
+ main()
+}
+
+// @calls main.main -> main.main
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/reflect.go b/llgo/third_party/go.tools/go/pointer/testdata/reflect.go
new file mode 100644
index 0000000000000000000000000000000000000000..6b8d0f22eb8f970b3cd8e4efd46ba73aa223bdb7
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/reflect.go
@@ -0,0 +1,115 @@
+// +build ignore
+
+package main
+
+import "reflect"
+import "unsafe"
+
+var a, b int
+var unknown bool
+
+func reflectIndirect() {
+ ptr := &a
+ // Pointer:
+ print(reflect.Indirect(reflect.ValueOf(&ptr)).Interface().(*int)) // @pointsto main.a
+ // Non-pointer:
+ print(reflect.Indirect(reflect.ValueOf([]*int{ptr})).Interface().([]*int)[0]) // @pointsto main.a
+}
+
+func reflectNewAt() {
+ var x [8]byte
+ print(reflect.NewAt(reflect.TypeOf(3), unsafe.Pointer(&x)).Interface()) // @types *int
+}
+
+// @warning "unsound: main.reflectNewAt contains a reflect.NewAt.. call"
+
+func reflectTypeOf() {
+ t := reflect.TypeOf(3)
+ if unknown {
+ t = reflect.TypeOf("foo")
+ }
+ // TODO(adonovan): make types.Eval let us refer to unexported types.
+ print(t) // #@types *reflect.rtype
+ print(reflect.Zero(t).Interface()) // @types int | string
+ newint := reflect.New(t).Interface() // @line rtonew
+ print(newint) // @types *int | *string
+ print(newint.(*int)) // @pointsto
+ print(newint.(*string)) // @pointsto
+}
+
+func reflectTypeElem() {
+ print(reflect.Zero(reflect.TypeOf(&a).Elem()).Interface()) // @types int
+ print(reflect.Zero(reflect.TypeOf([]string{}).Elem()).Interface()) // @types string
+ print(reflect.Zero(reflect.TypeOf(make(chan bool)).Elem()).Interface()) // @types bool
+ print(reflect.Zero(reflect.TypeOf(make(map[string]float64)).Elem()).Interface()) // @types float64
+ print(reflect.Zero(reflect.TypeOf([3]complex64{}).Elem()).Interface()) // @types complex64
+ print(reflect.Zero(reflect.TypeOf(3).Elem()).Interface()) // @types
+ print(reflect.Zero(reflect.TypeOf(new(interface{})).Elem())) // @types interface{}
+ print(reflect.Zero(reflect.TypeOf(new(interface{})).Elem()).Interface()) // @types
+}
+
+// reflect.Values within reflect.Values.
+func metareflection() {
+ // "box" a *int twice, unbox it twice.
+ v0 := reflect.ValueOf(&a)
+ print(v0) // @types *int
+ v1 := reflect.ValueOf(v0) // box
+ print(v1) // @types reflect.Value
+ v2 := reflect.ValueOf(v1) // box
+ print(v2) // @types reflect.Value
+ v1a := v2.Interface().(reflect.Value) // unbox
+ print(v1a) // @types reflect.Value
+ v0a := v1a.Interface().(reflect.Value) // unbox
+ print(v0a) // @types *int
+ print(v0a.Interface().(*int)) // @pointsto main.a
+
+ // "box" an interface{} lvalue twice, unbox it twice.
+ var iface interface{} = 3
+ x0 := reflect.ValueOf(&iface).Elem()
+ print(x0) // @types interface{}
+ x1 := reflect.ValueOf(x0) // box
+ print(x1) // @types reflect.Value
+ x2 := reflect.ValueOf(x1) // box
+ print(x2) // @types reflect.Value
+ x1a := x2.Interface().(reflect.Value) // unbox
+ print(x1a) // @types reflect.Value
+ x0a := x1a.Interface().(reflect.Value) // unbox
+ print(x0a) // @types interface{}
+ print(x0a.Interface()) // @types int
+}
+
+type T struct{}
+
+// When the output of a type constructor flows to its input, we must
+// bound the set of types created to ensure termination of the algorithm.
+func typeCycle() {
+ t := reflect.TypeOf(0)
+ u := reflect.TypeOf("")
+ v := reflect.TypeOf(T{})
+ for unknown {
+ t = reflect.PtrTo(t)
+ t = reflect.SliceOf(t)
+
+ u = reflect.SliceOf(u)
+
+ if unknown {
+ v = reflect.ChanOf(reflect.BothDir, v)
+ } else {
+ v = reflect.PtrTo(v)
+ }
+ }
+
+ // Type height is bounded to about 4 map/slice/chan/pointer constructors.
+ print(reflect.Zero(t).Interface()) // @types int | []*int | []*[]*int
+ print(reflect.Zero(u).Interface()) // @types string | []string | [][]string | [][][]string | [][][][]string
+ print(reflect.Zero(v).Interface()) // @types T | *T | **T | ***T | ****T | chan T | *chan T | **chan T | chan *T | *chan *T | chan **T | chan ***T | chan chan T | chan *chan T | chan chan *T
+}
+
+func main() {
+ reflectIndirect()
+ reflectNewAt()
+ reflectTypeOf()
+ reflectTypeElem()
+ metareflection()
+ typeCycle()
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/rtti.go b/llgo/third_party/go.tools/go/pointer/testdata/rtti.go
new file mode 100644
index 0000000000000000000000000000000000000000..826936de7733b8b7eb6525f4f2d04d16e608d9f8
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/rtti.go
@@ -0,0 +1,29 @@
+package main
+
+// Regression test for oracle crash
+// https://code.google.com/p/go/issues/detail?id=6605
+//
+// Using reflection, methods may be called on types that are not the
+// operand of any ssa.MakeInterface instruction. In this example,
+// (Y).F is called by deriving the type Y from *Y. Prior to the fix,
+// no RTTI (or method set) for type Y was included in the program, so
+// the F() call would crash.
+
+import "reflect"
+
+var a int
+
+type X struct{}
+
+func (X) F() *int {
+ return &a
+}
+
+type I interface {
+ F() *int
+}
+
+func main() {
+ type Y struct{ X }
+ print(reflect.Indirect(reflect.ValueOf(new(Y))).Interface().(I).F()) // @pointsto main.a
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/structreflect.go b/llgo/third_party/go.tools/go/pointer/testdata/structreflect.go
new file mode 100644
index 0000000000000000000000000000000000000000..9fb49f5590e783ac457fcdf4055ae76e0fc0223c
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/structreflect.go
@@ -0,0 +1,45 @@
+// +build ignore
+
+package main
+
+import "reflect"
+
+type A struct {
+ f *int
+ g interface{}
+ h bool
+}
+
+var dyn string
+
+func reflectTypeFieldByName() {
+ f, _ := reflect.TypeOf(A{}).FieldByName("f")
+ print(f.Type) // @pointsto *int
+
+ g, _ := reflect.TypeOf(A{}).FieldByName("g")
+ print(g.Type) // @pointsto interface{}
+ print(reflect.Zero(g.Type)) // @pointsto
+ print(reflect.Zero(g.Type)) // @types interface{}
+
+ print(reflect.Zero(g.Type).Interface()) // @pointsto
+ print(reflect.Zero(g.Type).Interface()) // @types
+
+ h, _ := reflect.TypeOf(A{}).FieldByName("h")
+ print(h.Type) // @pointsto bool
+
+ missing, _ := reflect.TypeOf(A{}).FieldByName("missing")
+ print(missing.Type) // @pointsto
+
+ dyn, _ := reflect.TypeOf(A{}).FieldByName(dyn)
+ print(dyn.Type) // @pointsto *int | bool | interface{}
+}
+
+func reflectTypeField() {
+ fld := reflect.TypeOf(A{}).Field(0)
+ print(fld.Type) // @pointsto *int | bool | interface{}
+}
+
+func main() {
+ reflectTypeFieldByName()
+ reflectTypeField()
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/structs.go b/llgo/third_party/go.tools/go/pointer/testdata/structs.go
new file mode 100644
index 0000000000000000000000000000000000000000..9036d608db99910559a401ff45e0c19e5bb87b4e
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/structs.go
@@ -0,0 +1,100 @@
+// +build ignore
+
+package main
+
+var unknown bool // defeat dead-code elimination
+
+var p, q int
+
+type A struct {
+ f *int
+ g interface{}
+}
+
+func (a A) m1() {
+ print(a.f) // @pointsto main.p
+}
+
+func (a *A) m2() {
+ print(a) // @pointsto complit.A@struct1s:9
+ print(a.f) // @pointsto main.p
+}
+
+type B struct {
+ h *int
+ A
+}
+
+func structs1() {
+ b := &B{ // @line struct1s
+ h: &q,
+ }
+ b.f = &p
+ b.g = b
+
+ print(b.h) // @pointsto main.q
+ print(b.f) // @pointsto main.p
+ print(b.g) // @types *B
+
+ ptr := &b.f
+ print(*ptr) // @pointsto main.p
+
+ b.m1()
+ b.m2()
+}
+
+// @calls main.structs1 -> (main.A).m1
+// @calls main.structs1 -> (*main.A).m2
+// @calls (*main.B).m1 -> (main.A).m1
+// @calls (*main.B).m2 -> (*main.A).m2
+
+type T struct {
+ x int
+ y int
+}
+
+type S struct {
+ a [3]T
+ b *[3]T
+ c [3]*T
+}
+
+func structs2() {
+ var s S // @line s2s
+ print(&s) // @pointsto s@s2s:6
+ print(&s.a) // @pointsto s.a@s2s:6
+ print(&s.a[0]) // @pointsto s.a[*]@s2s:6
+ print(&s.a[0].x) // @pointsto s.a[*].x@s2s:6
+ print(&s.a[0].y) // @pointsto s.a[*].y@s2s:6
+ print(&s.b) // @pointsto s.b@s2s:6
+ print(&s.b[0]) // @pointsto
+ print(&s.b[0].x) // @pointsto
+ print(&s.b[0].y) // @pointsto
+ print(&s.c) // @pointsto s.c@s2s:6
+ print(&s.c[0]) // @pointsto s.c[*]@s2s:6
+ print(&s.c[0].x) // @pointsto
+ print(&s.c[0].y) // @pointsto
+
+ var s2 S // @line s2s2
+ s2.b = new([3]T) // @line s2s2b
+ print(s2.b) // @pointsto new@s2s2b:12
+ print(&s2.b) // @pointsto s2.b@s2s2:6
+ print(&s2.b[0]) // @pointsto new[*]@s2s2b:12
+ print(&s2.b[0].x) // @pointsto new[*].x@s2s2b:12
+ print(&s2.b[0].y) // @pointsto new[*].y@s2s2b:12
+ print(&s2.c[0].x) // @pointsto
+ print(&s2.c[0].y) // @pointsto
+
+ var s3 S // @line s2s3
+ s3.c[2] = new(T) // @line s2s3c
+ print(&s3.c) // @pointsto s3.c@s2s3:6
+ print(s3.c[1]) // @pointsto new@s2s3c:15
+ print(&s3.c[1]) // @pointsto s3.c[*]@s2s3:6
+ print(&s3.c[1].x) // @pointsto new.x@s2s3c:15
+ print(&s3.c[1].y) // @pointsto new.y@s2s3c:15
+}
+
+func main() {
+ structs1()
+ structs2()
+}
diff --git a/llgo/third_party/go.tools/go/pointer/testdata/timer.go b/llgo/third_party/go.tools/go/pointer/testdata/timer.go
new file mode 100644
index 0000000000000000000000000000000000000000..465d0813a18b6cf426519b7716dc3f67b00a09c2
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/testdata/timer.go
@@ -0,0 +1,24 @@
+// +build ignore
+
+package main
+
+import "time"
+
+func after() {}
+
+func main() {
+ // @calls time.startTimer -> time.sendTime
+ ticker := time.NewTicker(1)
+ <-ticker.C
+
+ // @calls time.startTimer -> time.sendTime
+ timer := time.NewTimer(time.Second)
+ <-timer.C
+
+ // @calls time.startTimer -> time.goFunc
+ // @calls time.goFunc -> main.after
+ timer = time.AfterFunc(time.Second, after)
+ <-timer.C
+}
+
+// @calls time.sendTime -> time.Now
diff --git a/llgo/third_party/go.tools/go/pointer/util.go b/llgo/third_party/go.tools/go/pointer/util.go
new file mode 100644
index 0000000000000000000000000000000000000000..56637f2cc7de0a57d4c22356fe05f4201d0dc28e
--- /dev/null
+++ b/llgo/third_party/go.tools/go/pointer/util.go
@@ -0,0 +1,318 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pointer
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "runtime"
+ "time"
+
+ "llvm.org/llgo/third_party/go.tools/container/intsets"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// CanPoint reports whether the type T is pointerlike,
+// for the purposes of this analysis.
+func CanPoint(T types.Type) bool {
+ switch T := T.(type) {
+ case *types.Named:
+ if obj := T.Obj(); obj.Name() == "Value" && obj.Pkg().Path() == "reflect" {
+ return true // treat reflect.Value like interface{}
+ }
+ return CanPoint(T.Underlying())
+
+ case *types.Pointer, *types.Interface, *types.Map, *types.Chan, *types.Signature, *types.Slice:
+ return true
+ }
+
+ return false // array struct tuple builtin basic
+}
+
+// CanHaveDynamicTypes reports whether the type T can "hold" dynamic types,
+// i.e. is an interface (incl. reflect.Type) or a reflect.Value.
+//
+func CanHaveDynamicTypes(T types.Type) bool {
+ switch T := T.(type) {
+ case *types.Named:
+ if obj := T.Obj(); obj.Name() == "Value" && obj.Pkg().Path() == "reflect" {
+ return true // reflect.Value
+ }
+ return CanHaveDynamicTypes(T.Underlying())
+ case *types.Interface:
+ return true
+ }
+ return false
+}
+
+// isInterface reports whether T is an interface type.
+func isInterface(T types.Type) bool {
+ _, ok := T.Underlying().(*types.Interface)
+ return ok
+}
+
+// mustDeref returns the element type of its argument, which must be a
+// pointer; panic ensues otherwise.
+func mustDeref(typ types.Type) types.Type {
+ return typ.Underlying().(*types.Pointer).Elem()
+}
+
+// deref returns a pointer's element type; otherwise it returns typ.
+func deref(typ types.Type) types.Type {
+ if p, ok := typ.Underlying().(*types.Pointer); ok {
+ return p.Elem()
+ }
+ return typ
+}
+
+// A fieldInfo describes one subelement (node) of the flattening-out
+// of a type T: the subelement's type and its path from the root of T.
+//
+// For example, for this type:
+// type line struct{ points []struct{x, y int} }
+// flatten() of the inner struct yields the following []fieldInfo:
+// struct{ x, y int } ""
+// int ".x"
+// int ".y"
+// and flatten(line) yields:
+// struct{ points []struct{x, y int} } ""
+// struct{ x, y int } ".points[*]"
+// int ".points[*].x
+// int ".points[*].y"
+//
+type fieldInfo struct {
+ typ types.Type
+
+ // op and tail describe the path to the element (e.g. ".a#2.b[*].c").
+ op interface{} // *Array: true; *Tuple: int; *Struct: *types.Var; *Named: nil
+ tail *fieldInfo
+}
+
+// path returns a user-friendly string describing the subelement path.
+//
+func (fi *fieldInfo) path() string {
+ var buf bytes.Buffer
+ for p := fi; p != nil; p = p.tail {
+ switch op := p.op.(type) {
+ case bool:
+ fmt.Fprintf(&buf, "[*]")
+ case int:
+ fmt.Fprintf(&buf, "#%d", op)
+ case *types.Var:
+ fmt.Fprintf(&buf, ".%s", op.Name())
+ }
+ }
+ return buf.String()
+}
+
+// flatten returns a list of directly contained fields in the preorder
+// traversal of the type tree of t. The resulting elements are all
+// scalars (basic types or pointerlike types), except for struct/array
+// "identity" nodes, whose type is that of the aggregate.
+//
+// reflect.Value is considered pointerlike, similar to interface{}.
+//
+// Callers must not mutate the result.
+//
+func (a *analysis) flatten(t types.Type) []*fieldInfo {
+ fl, ok := a.flattenMemo[t]
+ if !ok {
+ switch t := t.(type) {
+ case *types.Named:
+ u := t.Underlying()
+ if isInterface(u) {
+ // Debuggability hack: don't remove
+ // the named type from interfaces as
+ // they're very verbose.
+ fl = append(fl, &fieldInfo{typ: t})
+ } else {
+ fl = a.flatten(u)
+ }
+
+ case *types.Basic,
+ *types.Signature,
+ *types.Chan,
+ *types.Map,
+ *types.Interface,
+ *types.Slice,
+ *types.Pointer:
+ fl = append(fl, &fieldInfo{typ: t})
+
+ case *types.Array:
+ fl = append(fl, &fieldInfo{typ: t}) // identity node
+ for _, fi := range a.flatten(t.Elem()) {
+ fl = append(fl, &fieldInfo{typ: fi.typ, op: true, tail: fi})
+ }
+
+ case *types.Struct:
+ fl = append(fl, &fieldInfo{typ: t}) // identity node
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ f := t.Field(i)
+ for _, fi := range a.flatten(f.Type()) {
+ fl = append(fl, &fieldInfo{typ: fi.typ, op: f, tail: fi})
+ }
+ }
+
+ case *types.Tuple:
+ // No identity node: tuples are never address-taken.
+ n := t.Len()
+ if n == 1 {
+ // Don't add a fieldInfo link for singletons,
+ // e.g. in params/results.
+ fl = append(fl, a.flatten(t.At(0).Type())...)
+ } else {
+ for i := 0; i < n; i++ {
+ f := t.At(i)
+ for _, fi := range a.flatten(f.Type()) {
+ fl = append(fl, &fieldInfo{typ: fi.typ, op: i, tail: fi})
+ }
+ }
+ }
+
+ default:
+ panic(t)
+ }
+
+ a.flattenMemo[t] = fl
+ }
+
+ return fl
+}
+
+// sizeof returns the number of pointerlike abstractions (nodes) in the type t.
+func (a *analysis) sizeof(t types.Type) uint32 {
+ return uint32(len(a.flatten(t)))
+}
+
+// shouldTrack reports whether object type T contains (recursively)
+// any fields whose addresses should be tracked.
+func (a *analysis) shouldTrack(T types.Type) bool {
+ if a.track == trackAll {
+ return true // fast path
+ }
+ track, ok := a.trackTypes[T]
+ if !ok {
+ a.trackTypes[T] = true // break cycles conservatively
+ // NB: reflect.Value, reflect.Type are pre-populated to true.
+ for _, fi := range a.flatten(T) {
+ switch ft := fi.typ.Underlying().(type) {
+ case *types.Interface, *types.Signature:
+ track = true // needed for callgraph
+ case *types.Basic:
+ // no-op
+ case *types.Chan:
+ track = a.track&trackChan != 0 || a.shouldTrack(ft.Elem())
+ case *types.Map:
+ track = a.track&trackMap != 0 || a.shouldTrack(ft.Key()) || a.shouldTrack(ft.Elem())
+ case *types.Slice:
+ track = a.track&trackSlice != 0 || a.shouldTrack(ft.Elem())
+ case *types.Pointer:
+ track = a.track&trackPtr != 0 || a.shouldTrack(ft.Elem())
+ case *types.Array, *types.Struct:
+ // No need to look at field types since they will follow (flattened).
+ default:
+ // Includes *types.Tuple, which are never address-taken.
+ panic(ft)
+ }
+ if track {
+ break
+ }
+ }
+ a.trackTypes[T] = track
+ if !track && a.log != nil {
+ fmt.Fprintf(a.log, "\ttype not tracked: %s\n", T)
+ }
+ }
+ return track
+}
+
+// offsetOf returns the (abstract) offset of field index within struct
+// or tuple typ.
+func (a *analysis) offsetOf(typ types.Type, index int) uint32 {
+ var offset uint32
+ switch t := typ.Underlying().(type) {
+ case *types.Tuple:
+ for i := 0; i < index; i++ {
+ offset += a.sizeof(t.At(i).Type())
+ }
+ case *types.Struct:
+ offset++ // the node for the struct itself
+ for i := 0; i < index; i++ {
+ offset += a.sizeof(t.Field(i).Type())
+ }
+ default:
+ panic(fmt.Sprintf("offsetOf(%s : %T)", typ, typ))
+ }
+ return offset
+}
+
+// sliceToArray returns the type representing the arrays to which
+// slice type slice points.
+func sliceToArray(slice types.Type) *types.Array {
+ return types.NewArray(slice.Underlying().(*types.Slice).Elem(), 1)
+}
+
+// Node set -------------------------------------------------------------------
+
+type nodeset struct {
+ intsets.Sparse
+}
+
+func (ns *nodeset) String() string {
+ var buf bytes.Buffer
+ buf.WriteRune('{')
+ var space [50]int
+ for i, n := range ns.AppendTo(space[:0]) {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteRune('n')
+ fmt.Fprintf(&buf, "%d", n)
+ }
+ buf.WriteRune('}')
+ return buf.String()
+}
+
+func (ns *nodeset) add(n nodeid) bool {
+ return ns.Sparse.Insert(int(n))
+}
+
+func (x *nodeset) addAll(y *nodeset) bool {
+ return x.UnionWith(&y.Sparse)
+}
+
+// Profiling & debugging -------------------------------------------------------
+
+var timers = make(map[string]time.Time)
+
+func start(name string) {
+ if debugTimers {
+ timers[name] = time.Now()
+ log.Printf("%s...\n", name)
+ }
+}
+
+func stop(name string) {
+ if debugTimers {
+ log.Printf("%s took %s\n", name, time.Since(timers[name]))
+ }
+}
+
+// diff runs the command "diff a b" and reports its success.
+func diff(a, b string) bool {
+ var cmd *exec.Cmd
+ switch runtime.GOOS {
+ case "plan9":
+ cmd = exec.Command("/bin/diff", "-c", a, b)
+ default:
+ cmd = exec.Command("/usr/bin/diff", "-u", a, b)
+ }
+ cmd.Stdout = os.Stderr
+ cmd.Stderr = os.Stderr
+ return cmd.Run() == nil
+}
diff --git a/llgo/third_party/go.tools/go/ssa/blockopt.go b/llgo/third_party/go.tools/go/ssa/blockopt.go
new file mode 100644
index 0000000000000000000000000000000000000000..e79260a21a2e1cb118554752c54cc88ceb681ee4
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/blockopt.go
@@ -0,0 +1,187 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// Simple block optimizations to simplify the control flow graph.
+
+// TODO(adonovan): opt: instead of creating several "unreachable" blocks
+// per function in the Builder, reuse a single one (e.g. at Blocks[1])
+// to reduce garbage.
+
+import (
+ "fmt"
+ "os"
+)
+
+// If true, perform sanity checking and show progress at each
+// successive iteration of optimizeBlocks. Very verbose.
+const debugBlockOpt = false
+
+// markReachable sets Index=-1 for all blocks reachable from b.
+func markReachable(b *BasicBlock) {
+ b.Index = -1
+ for _, succ := range b.Succs {
+ if succ.Index == 0 {
+ markReachable(succ)
+ }
+ }
+}
+
+// deleteUnreachableBlocks marks all reachable blocks of f and
+// eliminates (nils) all others, including possibly cyclic subgraphs.
+//
+func deleteUnreachableBlocks(f *Function) {
+ const white, black = 0, -1
+ // We borrow b.Index temporarily as the mark bit.
+ for _, b := range f.Blocks {
+ b.Index = white
+ }
+ markReachable(f.Blocks[0])
+ if f.Recover != nil {
+ markReachable(f.Recover)
+ }
+ for i, b := range f.Blocks {
+ if b.Index == white {
+ for _, c := range b.Succs {
+ if c.Index == black {
+ c.removePred(b) // delete white->black edge
+ }
+ }
+ if debugBlockOpt {
+ fmt.Fprintln(os.Stderr, "unreachable", b)
+ }
+ f.Blocks[i] = nil // delete b
+ }
+ }
+ f.removeNilBlocks()
+}
+
+// jumpThreading attempts to apply simple jump-threading to block b,
+// in which a->b->c become a->c if b is just a Jump.
+// The result is true if the optimization was applied.
+//
+func jumpThreading(f *Function, b *BasicBlock) bool {
+ if b.Index == 0 {
+ return false // don't apply to entry block
+ }
+ if b.Instrs == nil {
+ return false
+ }
+ if _, ok := b.Instrs[0].(*Jump); !ok {
+ return false // not just a jump
+ }
+ c := b.Succs[0]
+ if c == b {
+ return false // don't apply to degenerate jump-to-self.
+ }
+ if c.hasPhi() {
+ return false // not sound without more effort
+ }
+ for j, a := range b.Preds {
+ a.replaceSucc(b, c)
+
+ // If a now has two edges to c, replace its degenerate If by Jump.
+ if len(a.Succs) == 2 && a.Succs[0] == c && a.Succs[1] == c {
+ jump := new(Jump)
+ jump.setBlock(a)
+ a.Instrs[len(a.Instrs)-1] = jump
+ a.Succs = a.Succs[:1]
+ c.removePred(b)
+ } else {
+ if j == 0 {
+ c.replacePred(b, a)
+ } else {
+ c.Preds = append(c.Preds, a)
+ }
+ }
+
+ if debugBlockOpt {
+ fmt.Fprintln(os.Stderr, "jumpThreading", a, b, c)
+ }
+ }
+ f.Blocks[b.Index] = nil // delete b
+ return true
+}
+
+// fuseBlocks attempts to apply the block fusion optimization to block
+// a, in which a->b becomes ab if len(a.Succs)==len(b.Preds)==1.
+// The result is true if the optimization was applied.
+//
+func fuseBlocks(f *Function, a *BasicBlock) bool {
+ if len(a.Succs) != 1 {
+ return false
+ }
+ b := a.Succs[0]
+ if len(b.Preds) != 1 {
+ return false
+ }
+
+ // Degenerate &&/|| ops may result in a straight-line CFG
+ // containing φ-nodes. (Ideally we'd replace such them with
+ // their sole operand but that requires Referrers, built later.)
+ if b.hasPhi() {
+ return false // not sound without further effort
+ }
+
+ // Eliminate jump at end of A, then copy all of B across.
+ a.Instrs = append(a.Instrs[:len(a.Instrs)-1], b.Instrs...)
+ for _, instr := range b.Instrs {
+ instr.setBlock(a)
+ }
+
+ // A inherits B's successors
+ a.Succs = append(a.succs2[:0], b.Succs...)
+
+ // Fix up Preds links of all successors of B.
+ for _, c := range b.Succs {
+ c.replacePred(b, a)
+ }
+
+ if debugBlockOpt {
+ fmt.Fprintln(os.Stderr, "fuseBlocks", a, b)
+ }
+
+ f.Blocks[b.Index] = nil // delete b
+ return true
+}
+
+// optimizeBlocks() performs some simple block optimizations on a
+// completed function: dead block elimination, block fusion, jump
+// threading.
+//
+func optimizeBlocks(f *Function) {
+ deleteUnreachableBlocks(f)
+
+ // Loop until no further progress.
+ changed := true
+ for changed {
+ changed = false
+
+ if debugBlockOpt {
+ f.WriteTo(os.Stderr)
+ mustSanityCheck(f, nil)
+ }
+
+ for _, b := range f.Blocks {
+ // f.Blocks will temporarily contain nils to indicate
+ // deleted blocks; we remove them at the end.
+ if b == nil {
+ continue
+ }
+
+ // Fuse blocks. b->c becomes bc.
+ if fuseBlocks(f, b) {
+ changed = true
+ }
+
+ // a->b->c becomes a->c if b contains only a Jump.
+ if jumpThreading(f, b) {
+ changed = true
+ continue // (b was disconnected)
+ }
+ }
+ }
+ f.removeNilBlocks()
+}
diff --git a/llgo/third_party/go.tools/go/ssa/builder.go b/llgo/third_party/go.tools/go/ssa/builder.go
new file mode 100644
index 0000000000000000000000000000000000000000..4ca5e0e5f40907cb63adffa4281b34c6cb64f4f7
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/builder.go
@@ -0,0 +1,2410 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file implements the BUILD phase of SSA construction.
+//
+// SSA construction has two phases, CREATE and BUILD. In the CREATE phase
+// (create.go), all packages are constructed and type-checked and
+// definitions of all package members are created, method-sets are
+// computed, and wrapper methods are synthesized.
+// ssa.Packages are created in arbitrary order.
+//
+// In the BUILD phase (builder.go), the builder traverses the AST of
+// each Go source function and generates SSA instructions for the
+// function body. Initializer expressions for package-level variables
+// are emitted to the package's init() function in the order specified
+// by go/types.Info.InitOrder, then code for each function in the
+// package is generated in lexical order.
+// The BUILD phases for distinct packages are independent and are
+// executed in parallel.
+//
+// TODO(adonovan): indeed, building functions is now embarrassingly parallel.
+// Audit for concurrency then benchmark using more goroutines.
+//
+// The builder's and Program's indices (maps) are populated and
+// mutated during the CREATE phase, but during the BUILD phase they
+// remain constant. The sole exception is Prog.methodSets and its
+// related maps, which are protected by a dedicated mutex.
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "os"
+ "sync"
+ "sync/atomic"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+type opaqueType struct {
+ types.Type
+ name string
+}
+
+func (t *opaqueType) String() string { return t.name }
+
+var (
+ varOk = newVar("ok", tBool)
+ varIndex = newVar("index", tInt)
+
+ // Type constants.
+ tBool = types.Typ[types.Bool]
+ tByte = types.Typ[types.Byte]
+ tInt = types.Typ[types.Int]
+ tInvalid = types.Typ[types.Invalid]
+ tString = types.Typ[types.String]
+ tUntypedNil = types.Typ[types.UntypedNil]
+ tRangeIter = &opaqueType{nil, "iter"} // the type of all "range" iterators
+ tEface = new(types.Interface)
+
+ // SSA Value constants.
+ vZero = intConst(0)
+ vOne = intConst(1)
+ vTrue = NewConst(exact.MakeBool(true), tBool)
+)
+
+// builder holds state associated with the package currently being built.
+// Its methods contain all the logic for AST-to-SSA conversion.
+type builder struct{}
+
+// cond emits to fn code to evaluate boolean condition e and jump
+// to t or f depending on its value, performing various simplifications.
+//
+// Postcondition: fn.currentBlock is nil.
+//
+func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) {
+ switch e := e.(type) {
+ case *ast.ParenExpr:
+ b.cond(fn, e.X, t, f)
+ return
+
+ case *ast.BinaryExpr:
+ switch e.Op {
+ case token.LAND:
+ ltrue := fn.newBasicBlock("cond.true")
+ b.cond(fn, e.X, ltrue, f)
+ fn.currentBlock = ltrue
+ b.cond(fn, e.Y, t, f)
+ return
+
+ case token.LOR:
+ lfalse := fn.newBasicBlock("cond.false")
+ b.cond(fn, e.X, t, lfalse)
+ fn.currentBlock = lfalse
+ b.cond(fn, e.Y, t, f)
+ return
+ }
+
+ case *ast.UnaryExpr:
+ if e.Op == token.NOT {
+ b.cond(fn, e.X, f, t)
+ return
+ }
+ }
+
+ // A traditional compiler would simplify "if false" (etc) here
+ // but we do not, for better fidelity to the source code.
+ //
+ // The value of a constant condition may be platform-specific,
+ // and may cause blocks that are reachable in some configuration
+ // to be hidden from subsequent analyses such as bug-finding tools.
+ emitIf(fn, b.expr(fn, e), t, f)
+}
+
+// logicalBinop emits code to fn to evaluate e, a &&- or
+// ||-expression whose reified boolean value is wanted.
+// The value is returned.
+//
+func (b *builder) logicalBinop(fn *Function, e *ast.BinaryExpr) Value {
+ rhs := fn.newBasicBlock("binop.rhs")
+ done := fn.newBasicBlock("binop.done")
+
+ // T(e) = T(e.X) = T(e.Y) after untyped constants have been
+ // eliminated.
+ // TODO(adonovan): not true; MyBool==MyBool yields UntypedBool.
+ t := fn.Pkg.typeOf(e)
+
+ var short Value // value of the short-circuit path
+ switch e.Op {
+ case token.LAND:
+ b.cond(fn, e.X, rhs, done)
+ short = NewConst(exact.MakeBool(false), t)
+
+ case token.LOR:
+ b.cond(fn, e.X, done, rhs)
+ short = NewConst(exact.MakeBool(true), t)
+ }
+
+ // Is rhs unreachable?
+ if rhs.Preds == nil {
+ // Simplify false&&y to false, true||y to true.
+ fn.currentBlock = done
+ return short
+ }
+
+ // Is done unreachable?
+ if done.Preds == nil {
+ // Simplify true&&y (or false||y) to y.
+ fn.currentBlock = rhs
+ return b.expr(fn, e.Y)
+ }
+
+ // All edges from e.X to done carry the short-circuit value.
+ var edges []Value
+ for _ = range done.Preds {
+ edges = append(edges, short)
+ }
+
+ // The edge from e.Y to done carries the value of e.Y.
+ fn.currentBlock = rhs
+ edges = append(edges, b.expr(fn, e.Y))
+ emitJump(fn, done)
+ fn.currentBlock = done
+
+ phi := &Phi{Edges: edges, Comment: e.Op.String()}
+ phi.pos = e.OpPos
+ phi.typ = t
+ return done.emit(phi)
+}
+
+// exprN lowers a multi-result expression e to SSA form, emitting code
+// to fn and returning a single Value whose type is a *types.Tuple.
+// The caller must access the components via Extract.
+//
+// Multi-result expressions include CallExprs in a multi-value
+// assignment or return statement, and "value,ok" uses of
+// TypeAssertExpr, IndexExpr (when X is a map), and UnaryExpr (when Op
+// is token.ARROW).
+//
+func (b *builder) exprN(fn *Function, e ast.Expr) Value {
+ typ := fn.Pkg.typeOf(e).(*types.Tuple)
+ switch e := e.(type) {
+ case *ast.ParenExpr:
+ return b.exprN(fn, e.X)
+
+ case *ast.CallExpr:
+ // Currently, no built-in function nor type conversion
+ // has multiple results, so we can avoid some of the
+ // cases for single-valued CallExpr.
+ var c Call
+ b.setCall(fn, e, &c.Call)
+ c.typ = typ
+ return fn.emit(&c)
+
+ case *ast.IndexExpr:
+ mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map)
+ lookup := &Lookup{
+ X: b.expr(fn, e.X),
+ Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()),
+ CommaOk: true,
+ }
+ lookup.setType(typ)
+ lookup.setPos(e.Lbrack)
+ return fn.emit(lookup)
+
+ case *ast.TypeAssertExpr:
+ return emitTypeTest(fn, b.expr(fn, e.X), typ.At(0).Type(), e.Lparen)
+
+ case *ast.UnaryExpr: // must be receive <-
+ unop := &UnOp{
+ Op: token.ARROW,
+ X: b.expr(fn, e.X),
+ CommaOk: true,
+ }
+ unop.setType(typ)
+ unop.setPos(e.OpPos)
+ return fn.emit(unop)
+ }
+ panic(fmt.Sprintf("exprN(%T) in %s", e, fn))
+}
+
+// builtin emits to fn SSA instructions to implement a call to the
+// built-in function obj with the specified arguments
+// and return type. It returns the value defined by the result.
+//
+// The result is nil if no special handling was required; in this case
+// the caller should treat this like an ordinary library function
+// call.
+//
+func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ types.Type, pos token.Pos) Value {
+ switch obj.Name() {
+ case "make":
+ switch typ.Underlying().(type) {
+ case *types.Slice:
+ n := b.expr(fn, args[1])
+ m := n
+ if len(args) == 3 {
+ m = b.expr(fn, args[2])
+ }
+ v := &MakeSlice{
+ Len: n,
+ Cap: m,
+ }
+ v.setPos(pos)
+ v.setType(typ)
+ return fn.emit(v)
+
+ case *types.Map:
+ var res Value
+ if len(args) == 2 {
+ res = b.expr(fn, args[1])
+ }
+ v := &MakeMap{Reserve: res}
+ v.setPos(pos)
+ v.setType(typ)
+ return fn.emit(v)
+
+ case *types.Chan:
+ var sz Value = vZero
+ if len(args) == 2 {
+ sz = b.expr(fn, args[1])
+ }
+ v := &MakeChan{Size: sz}
+ v.setPos(pos)
+ v.setType(typ)
+ return fn.emit(v)
+ }
+
+ case "new":
+ alloc := emitNew(fn, deref(typ), pos)
+ alloc.Comment = "new"
+ return alloc
+
+ case "len", "cap":
+ // Special case: len or cap of an array or *array is
+ // based on the type, not the value which may be nil.
+ // We must still evaluate the value, though. (If it
+ // was side-effect free, the whole call would have
+ // been constant-folded.)
+ t := deref(fn.Pkg.typeOf(args[0])).Underlying()
+ if at, ok := t.(*types.Array); ok {
+ b.expr(fn, args[0]) // for effects only
+ return intConst(at.Len())
+ }
+ // Otherwise treat as normal.
+
+ case "panic":
+ fn.emit(&Panic{
+ X: emitConv(fn, b.expr(fn, args[0]), tEface),
+ pos: pos,
+ })
+ fn.currentBlock = fn.newBasicBlock("unreachable")
+ return vTrue // any non-nil Value will do
+ }
+ return nil // treat all others as a regular function call
+}
+
+// addr lowers a single-result addressable expression e to SSA form,
+// emitting code to fn and returning the location (an lvalue) defined
+// by the expression.
+//
+// If escaping is true, addr marks the base variable of the
+// addressable expression e as being a potentially escaping pointer
+// value. For example, in this code:
+//
+// a := A{
+// b: [1]B{B{c: 1}}
+// }
+// return &a.b[0].c
+//
+// the application of & causes a.b[0].c to have its address taken,
+// which means that ultimately the local variable a must be
+// heap-allocated. This is a simple but very conservative escape
+// analysis.
+//
+// Operations forming potentially escaping pointers include:
+// - &x, including when implicit in method call or composite literals.
+// - a[:] iff a is an array (not *array)
+// - references to variables in lexically enclosing functions.
+//
+func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue {
+ switch e := e.(type) {
+ case *ast.Ident:
+ if isBlankIdent(e) {
+ return blank{}
+ }
+ obj := fn.Pkg.objectOf(e)
+ v := fn.Prog.packageLevelValue(obj) // var (address)
+ if v == nil {
+ v = fn.lookup(obj, escaping)
+ }
+ return &address{addr: v, expr: e}
+
+ case *ast.CompositeLit:
+ t := deref(fn.Pkg.typeOf(e))
+ var v *Alloc
+ if escaping {
+ v = emitNew(fn, t, e.Lbrace)
+ } else {
+ v = fn.addLocal(t, e.Lbrace)
+ }
+ v.Comment = "complit"
+ b.compLit(fn, v, e, true) // initialize in place
+ return &address{addr: v, expr: e}
+
+ case *ast.ParenExpr:
+ return b.addr(fn, e.X, escaping)
+
+ case *ast.SelectorExpr:
+ sel, ok := fn.Pkg.info.Selections[e]
+ if !ok {
+ // qualified identifier
+ return b.addr(fn, e.Sel, escaping)
+ }
+ if sel.Kind() != types.FieldVal {
+ panic(sel)
+ }
+ wantAddr := true
+ v := b.receiver(fn, e.X, wantAddr, escaping, sel)
+ last := len(sel.Index()) - 1
+ return &address{
+ addr: emitFieldSelection(fn, v, sel.Index()[last], true, e.Sel),
+ expr: e.Sel,
+ }
+
+ case *ast.IndexExpr:
+ var x Value
+ var et types.Type
+ switch t := fn.Pkg.typeOf(e.X).Underlying().(type) {
+ case *types.Array:
+ x = b.addr(fn, e.X, escaping).address(fn)
+ et = types.NewPointer(t.Elem())
+ case *types.Pointer: // *array
+ x = b.expr(fn, e.X)
+ et = types.NewPointer(t.Elem().Underlying().(*types.Array).Elem())
+ case *types.Slice:
+ x = b.expr(fn, e.X)
+ et = types.NewPointer(t.Elem())
+ case *types.Map:
+ return &element{
+ m: b.expr(fn, e.X),
+ k: emitConv(fn, b.expr(fn, e.Index), t.Key()),
+ t: t.Elem(),
+ pos: e.Lbrack,
+ }
+ default:
+ panic("unexpected container type in IndexExpr: " + t.String())
+ }
+ v := &IndexAddr{
+ X: x,
+ Index: emitConv(fn, b.expr(fn, e.Index), tInt),
+ }
+ v.setPos(e.Lbrack)
+ v.setType(et)
+ return &address{addr: fn.emit(v), expr: e}
+
+ case *ast.StarExpr:
+ return &address{addr: b.expr(fn, e.X), starPos: e.Star, expr: e}
+ }
+
+ panic(fmt.Sprintf("unexpected address expression: %T", e))
+}
+
+// exprInPlace emits to fn code to initialize the lvalue loc with the
+// value of expression e. If isZero is true, exprInPlace assumes that loc
+// holds the zero value for its type.
+//
+// This is equivalent to loc.store(fn, b.expr(fn, e)) but may
+// generate better code in some cases, e.g. for composite literals
+// in an addressable location.
+//
+func (b *builder) exprInPlace(fn *Function, loc lvalue, e ast.Expr, isZero bool) {
+ if e, ok := unparen(e).(*ast.CompositeLit); ok {
+ // A CompositeLit never evaluates to a pointer,
+ // so if the type of the location is a pointer,
+ // an &-operation is implied.
+ if _, ok := loc.(blank); !ok { // avoid calling blank.typ()
+ if isPointer(loc.typ()) {
+ ptr := b.addr(fn, e, true).address(fn)
+ loc.store(fn, ptr) // copy address
+ return
+ }
+ }
+
+ if _, ok := loc.(*address); ok {
+ if isInterface(loc.typ()) {
+ // e.g. var x interface{} = T{...}
+ // Can't in-place initialize an interface value.
+ // Fall back to copying.
+ } else {
+ addr := loc.address(fn)
+ b.compLit(fn, addr, e, isZero) // in place
+ emitDebugRef(fn, e, addr, true)
+ return
+ }
+ }
+ }
+ loc.store(fn, b.expr(fn, e)) // copy value
+}
+
+// expr lowers a single-result expression e to SSA form, emitting code
+// to fn and returning the Value defined by the expression.
+//
+func (b *builder) expr(fn *Function, e ast.Expr) Value {
+ e = unparen(e)
+
+ tv := fn.Pkg.info.Types[e]
+
+ // Is expression a constant?
+ if tv.Value != nil {
+ return NewConst(tv.Value, tv.Type)
+ }
+
+ var v Value
+ if tv.Addressable() {
+ // Prefer pointer arithmetic ({Index,Field}Addr) followed
+ // by Load over subelement extraction (e.g. Index, Field),
+ // to avoid large copies.
+ v = b.addr(fn, e, false).load(fn)
+ } else {
+ v = b.expr0(fn, e, tv)
+ }
+ if fn.debugInfo() {
+ emitDebugRef(fn, e, v, false)
+ }
+ return v
+}
+
+func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
+ switch e := e.(type) {
+ case *ast.BasicLit:
+ panic("non-constant BasicLit") // unreachable
+
+ case *ast.FuncLit:
+ fn2 := &Function{
+ name: fmt.Sprintf("%s$%d", fn.Name(), 1+len(fn.AnonFuncs)),
+ Signature: fn.Pkg.typeOf(e.Type).Underlying().(*types.Signature),
+ pos: e.Type.Func,
+ parent: fn,
+ Pkg: fn.Pkg,
+ Prog: fn.Prog,
+ syntax: e,
+ }
+ fn.AnonFuncs = append(fn.AnonFuncs, fn2)
+ b.buildFunction(fn2)
+ if fn2.FreeVars == nil {
+ return fn2
+ }
+ v := &MakeClosure{Fn: fn2}
+ v.setType(tv.Type)
+ for _, fv := range fn2.FreeVars {
+ v.Bindings = append(v.Bindings, fv.outer)
+ fv.outer = nil
+ }
+ return fn.emit(v)
+
+ case *ast.TypeAssertExpr: // single-result form only
+ return emitTypeAssert(fn, b.expr(fn, e.X), tv.Type, e.Lparen)
+
+ case *ast.CallExpr:
+ if fn.Pkg.info.Types[e.Fun].IsType() {
+ // Explicit type conversion, e.g. string(x) or big.Int(x)
+ x := b.expr(fn, e.Args[0])
+ y := emitConv(fn, x, tv.Type)
+ if y != x {
+ switch y := y.(type) {
+ case *Convert:
+ y.pos = e.Lparen
+ case *ChangeType:
+ y.pos = e.Lparen
+ case *MakeInterface:
+ y.pos = e.Lparen
+ }
+ }
+ return y
+ }
+ // Call to "intrinsic" built-ins, e.g. new, make, panic.
+ if id, ok := unparen(e.Fun).(*ast.Ident); ok {
+ if obj, ok := fn.Pkg.info.Uses[id].(*types.Builtin); ok {
+ if v := b.builtin(fn, obj, e.Args, tv.Type, e.Lparen); v != nil {
+ return v
+ }
+ }
+ }
+ // Regular function call.
+ var v Call
+ b.setCall(fn, e, &v.Call)
+ v.setType(tv.Type)
+ return fn.emit(&v)
+
+ case *ast.UnaryExpr:
+ switch e.Op {
+ case token.AND: // &X --- potentially escaping.
+ addr := b.addr(fn, e.X, true)
+ if _, ok := unparen(e.X).(*ast.StarExpr); ok {
+ // &*p must panic if p is nil (http://golang.org/s/go12nil).
+ // For simplicity, we'll just (suboptimally) rely
+ // on the side effects of a load.
+ // TODO(adonovan): emit dedicated nilcheck.
+ addr.load(fn)
+ }
+ return addr.address(fn)
+ case token.ADD:
+ return b.expr(fn, e.X)
+ case token.NOT, token.ARROW, token.SUB, token.XOR: // ! <- - ^
+ v := &UnOp{
+ Op: e.Op,
+ X: b.expr(fn, e.X),
+ }
+ v.setPos(e.OpPos)
+ v.setType(tv.Type)
+ return fn.emit(v)
+ default:
+ panic(e.Op)
+ }
+
+ case *ast.BinaryExpr:
+ switch e.Op {
+ case token.LAND, token.LOR:
+ return b.logicalBinop(fn, e)
+ case token.SHL, token.SHR:
+ fallthrough
+ case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT:
+ return emitArith(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), tv.Type, e.OpPos)
+
+ case token.EQL, token.NEQ, token.GTR, token.LSS, token.LEQ, token.GEQ:
+ cmp := emitCompare(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), e.OpPos)
+ // The type of x==y may be UntypedBool.
+ return emitConv(fn, cmp, DefaultType(tv.Type))
+ default:
+ panic("illegal op in BinaryExpr: " + e.Op.String())
+ }
+
+ case *ast.SliceExpr:
+ var low, high, max Value
+ var x Value
+ switch fn.Pkg.typeOf(e.X).Underlying().(type) {
+ case *types.Array:
+ // Potentially escaping.
+ x = b.addr(fn, e.X, true).address(fn)
+ case *types.Basic, *types.Slice, *types.Pointer: // *array
+ x = b.expr(fn, e.X)
+ default:
+ unreachable()
+ }
+ if e.High != nil {
+ high = b.expr(fn, e.High)
+ }
+ if e.Low != nil {
+ low = b.expr(fn, e.Low)
+ }
+ if e.Slice3 {
+ max = b.expr(fn, e.Max)
+ }
+ v := &Slice{
+ X: x,
+ Low: low,
+ High: high,
+ Max: max,
+ }
+ v.setPos(e.Lbrack)
+ v.setType(tv.Type)
+ return fn.emit(v)
+
+ case *ast.Ident:
+ obj := fn.Pkg.info.Uses[e]
+ // Universal built-in or nil?
+ switch obj := obj.(type) {
+ case *types.Builtin:
+ return &Builtin{name: obj.Name(), sig: tv.Type.(*types.Signature)}
+ case *types.Nil:
+ return nilConst(tv.Type)
+ }
+ // Package-level func or var?
+ if v := fn.Prog.packageLevelValue(obj); v != nil {
+ if _, ok := obj.(*types.Var); ok {
+ return emitLoad(fn, v) // var (address)
+ }
+ return v // (func)
+ }
+ // Local var.
+ return emitLoad(fn, fn.lookup(obj, false)) // var (address)
+
+ case *ast.SelectorExpr:
+ sel, ok := fn.Pkg.info.Selections[e]
+ if !ok {
+ // qualified identifier
+ return b.expr(fn, e.Sel)
+ }
+ switch sel.Kind() {
+ case types.MethodExpr:
+ // (*T).f or T.f, the method f from the method-set of type T.
+ // The result is a "thunk".
+ return emitConv(fn, makeThunk(fn.Prog, sel), tv.Type)
+
+ case types.MethodVal:
+ // e.f where e is an expression and f is a method.
+ // The result is a "bound".
+ obj := sel.Obj().(*types.Func)
+ rt := recvType(obj)
+ wantAddr := isPointer(rt)
+ escaping := true
+ v := b.receiver(fn, e.X, wantAddr, escaping, sel)
+ if isInterface(rt) {
+ // If v has interface type I,
+ // we must emit a check that v is non-nil.
+ // We use: typeassert v.(I).
+ emitTypeAssert(fn, v, rt, token.NoPos)
+ }
+ c := &MakeClosure{
+ Fn: makeBound(fn.Prog, obj),
+ Bindings: []Value{v},
+ }
+ c.setPos(e.Sel.Pos())
+ c.setType(tv.Type)
+ return fn.emit(c)
+
+ case types.FieldVal:
+ indices := sel.Index()
+ last := len(indices) - 1
+ v := b.expr(fn, e.X)
+ v = emitImplicitSelections(fn, v, indices[:last])
+ v = emitFieldSelection(fn, v, indices[last], false, e.Sel)
+ return v
+ }
+
+ panic("unexpected expression-relative selector")
+
+ case *ast.IndexExpr:
+ switch t := fn.Pkg.typeOf(e.X).Underlying().(type) {
+ case *types.Array:
+ // Non-addressable array (in a register).
+ v := &Index{
+ X: b.expr(fn, e.X),
+ Index: emitConv(fn, b.expr(fn, e.Index), tInt),
+ }
+ v.setPos(e.Lbrack)
+ v.setType(t.Elem())
+ return fn.emit(v)
+
+ case *types.Map:
+ // Maps are not addressable.
+ mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map)
+ v := &Lookup{
+ X: b.expr(fn, e.X),
+ Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()),
+ }
+ v.setPos(e.Lbrack)
+ v.setType(mapt.Elem())
+ return fn.emit(v)
+
+ case *types.Basic: // => string
+ // Strings are not addressable.
+ v := &Lookup{
+ X: b.expr(fn, e.X),
+ Index: b.expr(fn, e.Index),
+ }
+ v.setPos(e.Lbrack)
+ v.setType(tByte)
+ return fn.emit(v)
+
+ case *types.Slice, *types.Pointer: // *array
+ // Addressable slice/array; use IndexAddr and Load.
+ return b.addr(fn, e, false).load(fn)
+
+ default:
+ panic("unexpected container type in IndexExpr: " + t.String())
+ }
+
+ case *ast.CompositeLit, *ast.StarExpr:
+ // Addressable types (lvalues)
+ return b.addr(fn, e, false).load(fn)
+ }
+
+ panic(fmt.Sprintf("unexpected expr: %T", e))
+}
+
+// stmtList emits to fn code for all statements in list.
+func (b *builder) stmtList(fn *Function, list []ast.Stmt) {
+ for _, s := range list {
+ b.stmt(fn, s)
+ }
+}
+
+// receiver emits to fn code for expression e in the "receiver"
+// position of selection e.f (where f may be a field or a method) and
+// returns the effective receiver after applying the implicit field
+// selections of sel.
+//
+// wantAddr requests that the result is an an address. If
+// !sel.Indirect(), this may require that e be build in addr() mode; it
+// must thus be addressable.
+//
+// escaping is defined as per builder.addr().
+//
+func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *types.Selection) Value {
+ var v Value
+ if wantAddr && !sel.Indirect() && !isPointer(fn.Pkg.typeOf(e)) {
+ v = b.addr(fn, e, escaping).address(fn)
+ } else {
+ v = b.expr(fn, e)
+ }
+
+ last := len(sel.Index()) - 1
+ v = emitImplicitSelections(fn, v, sel.Index()[:last])
+ if !wantAddr && isPointer(v.Type()) {
+ v = emitLoad(fn, v)
+ }
+ return v
+}
+
+// setCallFunc populates the function parts of a CallCommon structure
+// (Func, Method, Recv, Args[0]) based on the kind of invocation
+// occurring in e.
+//
+func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) {
+ c.pos = e.Lparen
+
+ // Is this a method call?
+ if selector, ok := unparen(e.Fun).(*ast.SelectorExpr); ok {
+ sel, ok := fn.Pkg.info.Selections[selector]
+ if ok && sel.Kind() == types.MethodVal {
+ obj := sel.Obj().(*types.Func)
+ wantAddr := isPointer(recvType(obj))
+ escaping := true
+ v := b.receiver(fn, selector.X, wantAddr, escaping, sel)
+ if isInterface(deref(v.Type())) {
+ // Invoke-mode call.
+ c.Value = v
+ c.Method = obj
+ } else {
+ // "Call"-mode call.
+ c.Value = fn.Prog.declaredFunc(obj)
+ c.Args = append(c.Args, v)
+ }
+ return
+ }
+
+ // sel.Kind()==MethodExpr indicates T.f() or (*T).f():
+ // a statically dispatched call to the method f in the
+ // method-set of T or *T. T may be an interface.
+ //
+ // e.Fun would evaluate to a concrete method, interface
+ // wrapper function, or promotion wrapper.
+ //
+ // For now, we evaluate it in the usual way.
+ //
+ // TODO(adonovan): opt: inline expr() here, to make the
+ // call static and to avoid generation of wrappers.
+ // It's somewhat tricky as it may consume the first
+ // actual parameter if the call is "invoke" mode.
+ //
+ // Examples:
+ // type T struct{}; func (T) f() {} // "call" mode
+ // type T interface { f() } // "invoke" mode
+ //
+ // type S struct{ T }
+ //
+ // var s S
+ // S.f(s)
+ // (*S).f(&s)
+ //
+ // Suggested approach:
+ // - consume the first actual parameter expression
+ // and build it with b.expr().
+ // - apply implicit field selections.
+ // - use MethodVal logic to populate fields of c.
+ }
+
+ // Evaluate the function operand in the usual way.
+ c.Value = b.expr(fn, e.Fun)
+}
+
+// emitCallArgs emits to f code for the actual parameters of call e to
+// a (possibly built-in) function of effective type sig.
+// The argument values are appended to args, which is then returned.
+//
+func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallExpr, args []Value) []Value {
+ // f(x, y, z...): pass slice z straight through.
+ if e.Ellipsis != 0 {
+ for i, arg := range e.Args {
+ v := emitConv(fn, b.expr(fn, arg), sig.Params().At(i).Type())
+ args = append(args, v)
+ }
+ return args
+ }
+
+ offset := len(args) // 1 if call has receiver, 0 otherwise
+
+ // Evaluate actual parameter expressions.
+ //
+ // If this is a chained call of the form f(g()) where g has
+ // multiple return values (MRV), they are flattened out into
+ // args; a suffix of them may end up in a varargs slice.
+ for _, arg := range e.Args {
+ v := b.expr(fn, arg)
+ if ttuple, ok := v.Type().(*types.Tuple); ok { // MRV chain
+ for i, n := 0, ttuple.Len(); i < n; i++ {
+ args = append(args, emitExtract(fn, v, i))
+ }
+ } else {
+ args = append(args, v)
+ }
+ }
+
+ // Actual->formal assignability conversions for normal parameters.
+ np := sig.Params().Len() // number of normal parameters
+ if sig.Variadic() {
+ np--
+ }
+ for i := 0; i < np; i++ {
+ args[offset+i] = emitConv(fn, args[offset+i], sig.Params().At(i).Type())
+ }
+
+ // Actual->formal assignability conversions for variadic parameter,
+ // and construction of slice.
+ if sig.Variadic() {
+ varargs := args[offset+np:]
+ st := sig.Params().At(np).Type().(*types.Slice)
+ vt := st.Elem()
+ if len(varargs) == 0 {
+ args = append(args, nilConst(st))
+ } else {
+ // Replace a suffix of args with a slice containing it.
+ at := types.NewArray(vt, int64(len(varargs)))
+ a := emitNew(fn, at, token.NoPos)
+ a.setPos(e.Rparen)
+ a.Comment = "varargs"
+ for i, arg := range varargs {
+ iaddr := &IndexAddr{
+ X: a,
+ Index: intConst(int64(i)),
+ }
+ iaddr.setType(types.NewPointer(vt))
+ fn.emit(iaddr)
+ emitStore(fn, iaddr, arg)
+ }
+ s := &Slice{X: a}
+ s.setType(st)
+ args[offset+np] = fn.emit(s)
+ args = args[:offset+np+1]
+ }
+ }
+ return args
+}
+
+// setCall emits to fn code to evaluate all the parameters of a function
+// call e, and populates *c with those values.
+//
+func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) {
+ // First deal with the f(...) part and optional receiver.
+ b.setCallFunc(fn, e, c)
+
+ // Then append the other actual parameters.
+ sig, _ := fn.Pkg.typeOf(e.Fun).Underlying().(*types.Signature)
+ if sig == nil {
+ panic(fmt.Sprintf("no signature for call of %s", e.Fun))
+ }
+ c.Args = b.emitCallArgs(fn, sig, e, c.Args)
+}
+
+// assignOp emits to fn code to perform loc += incr or loc -= incr.
+func (b *builder) assignOp(fn *Function, loc lvalue, incr Value, op token.Token) {
+ oldv := loc.load(fn)
+ loc.store(fn, emitArith(fn, op, oldv, emitConv(fn, incr, oldv.Type()), loc.typ(), token.NoPos))
+}
+
+// localValueSpec emits to fn code to define all of the vars in the
+// function-local ValueSpec, spec.
+//
+func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) {
+ switch {
+ case len(spec.Values) == len(spec.Names):
+ // e.g. var x, y = 0, 1
+ // 1:1 assignment
+ for i, id := range spec.Names {
+ if !isBlankIdent(id) {
+ fn.addLocalForIdent(id)
+ }
+ lval := b.addr(fn, id, false) // non-escaping
+ b.exprInPlace(fn, lval, spec.Values[i], true)
+ }
+
+ case len(spec.Values) == 0:
+ // e.g. var x, y int
+ // Locals are implicitly zero-initialized.
+ for _, id := range spec.Names {
+ if !isBlankIdent(id) {
+ lhs := fn.addLocalForIdent(id)
+ if fn.debugInfo() {
+ emitDebugRef(fn, id, lhs, true)
+ }
+ }
+ }
+
+ default:
+ // e.g. var x, y = pos()
+ tuple := b.exprN(fn, spec.Values[0])
+ for i, id := range spec.Names {
+ if !isBlankIdent(id) {
+ fn.addLocalForIdent(id)
+ lhs := b.addr(fn, id, false) // non-escaping
+ lhs.store(fn, emitExtract(fn, tuple, i))
+ }
+ }
+ }
+}
+
+// assignStmt emits code to fn for a parallel assignment of rhss to lhss.
+// isDef is true if this is a short variable declaration (:=).
+//
+// Note the similarity with localValueSpec.
+//
+func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool) {
+ // Side effects of all LHSs and RHSs must occur in left-to-right order.
+ var lvals []lvalue
+ for _, lhs := range lhss {
+ var lval lvalue = blank{}
+ if !isBlankIdent(lhs) {
+ if isDef {
+ if obj := fn.Pkg.info.Defs[lhs.(*ast.Ident)]; obj != nil {
+ fn.addNamedLocal(obj)
+ }
+ }
+ lval = b.addr(fn, lhs, false) // non-escaping
+ }
+ lvals = append(lvals, lval)
+ }
+ if len(lhss) == len(rhss) {
+ // e.g. x, y = f(), g()
+ if len(lhss) == 1 {
+ // x = type{...}
+ // Optimization: in-place construction
+ // of composite literals.
+ b.exprInPlace(fn, lvals[0], rhss[0], false)
+ } else {
+ // Parallel assignment. All reads must occur
+ // before all updates, precluding exprInPlace.
+ var rvals []Value
+ for _, rval := range rhss {
+ rvals = append(rvals, b.expr(fn, rval))
+ }
+ for i, lval := range lvals {
+ lval.store(fn, rvals[i])
+ }
+ }
+ } else {
+ // e.g. x, y = pos()
+ tuple := b.exprN(fn, rhss[0])
+ for i, lval := range lvals {
+ lval.store(fn, emitExtract(fn, tuple, i))
+ }
+ }
+}
+
+// arrayLen returns the length of the array whose composite literal elements are elts.
+func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 {
+ var max int64 = -1
+ var i int64 = -1
+ for _, e := range elts {
+ if kv, ok := e.(*ast.KeyValueExpr); ok {
+ i = b.expr(fn, kv.Key).(*Const).Int64()
+ } else {
+ i++
+ }
+ if i > max {
+ max = i
+ }
+ }
+ return max + 1
+}
+
+// compLit emits to fn code to initialize a composite literal e at
+// address addr with type typ, typically allocated by Alloc.
+// Nested composite literals are recursively initialized in place
+// where possible. If isZero is true, compLit assumes that addr
+// holds the zero value for typ.
+//
+// A CompositeLit may have pointer type only in the recursive (nested)
+// case when the type name is implicit. e.g. in []*T{{}}, the inner
+// literal has type *T behaves like &T{}.
+// In that case, addr must hold a T, not a *T.
+//
+func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool) {
+ typ := deref(fn.Pkg.typeOf(e))
+ switch t := typ.Underlying().(type) {
+ case *types.Struct:
+ if !isZero && len(e.Elts) != t.NumFields() {
+ emitMemClear(fn, addr)
+ isZero = true
+ }
+ for i, e := range e.Elts {
+ fieldIndex := i
+ if kv, ok := e.(*ast.KeyValueExpr); ok {
+ fname := kv.Key.(*ast.Ident).Name
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ sf := t.Field(i)
+ if sf.Name() == fname {
+ fieldIndex = i
+ e = kv.Value
+ break
+ }
+ }
+ }
+ sf := t.Field(fieldIndex)
+ faddr := &FieldAddr{
+ X: addr,
+ Field: fieldIndex,
+ }
+ faddr.setType(types.NewPointer(sf.Type()))
+ fn.emit(faddr)
+ b.exprInPlace(fn, &address{addr: faddr, expr: e}, e, isZero)
+ }
+
+ case *types.Array, *types.Slice:
+ var at *types.Array
+ var array Value
+ switch t := t.(type) {
+ case *types.Slice:
+ at = types.NewArray(t.Elem(), b.arrayLen(fn, e.Elts))
+ alloc := emitNew(fn, at, e.Lbrace)
+ alloc.Comment = "slicelit"
+ array = alloc
+ isZero = true
+ case *types.Array:
+ at = t
+ array = addr
+ }
+
+ if !isZero && int64(len(e.Elts)) != at.Len() {
+ emitMemClear(fn, array)
+ isZero = true
+ }
+
+ var idx *Const
+ for _, e := range e.Elts {
+ if kv, ok := e.(*ast.KeyValueExpr); ok {
+ idx = b.expr(fn, kv.Key).(*Const)
+ e = kv.Value
+ } else {
+ var idxval int64
+ if idx != nil {
+ idxval = idx.Int64() + 1
+ }
+ idx = intConst(idxval)
+ }
+ iaddr := &IndexAddr{
+ X: array,
+ Index: idx,
+ }
+ iaddr.setType(types.NewPointer(at.Elem()))
+ fn.emit(iaddr)
+ b.exprInPlace(fn, &address{addr: iaddr, expr: e}, e, isZero)
+ }
+ if t != at { // slice
+ s := &Slice{X: array}
+ s.setPos(e.Lbrace)
+ s.setType(typ)
+ emitStore(fn, addr, fn.emit(s))
+ }
+
+ case *types.Map:
+ m := &MakeMap{Reserve: intConst(int64(len(e.Elts)))}
+ m.setPos(e.Lbrace)
+ m.setType(typ)
+ emitStore(fn, addr, fn.emit(m))
+ for _, e := range e.Elts {
+ e := e.(*ast.KeyValueExpr)
+ loc := &element{
+ m: m,
+ k: emitConv(fn, b.expr(fn, e.Key), t.Key()),
+ t: t.Elem(),
+ pos: e.Colon,
+ }
+ b.exprInPlace(fn, loc, e.Value, true)
+ }
+
+ default:
+ panic("unexpected CompositeLit type: " + t.String())
+ }
+}
+
+// switchStmt emits to fn code for the switch statement s, optionally
+// labelled by label.
+//
+func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) {
+ // We treat SwitchStmt like a sequential if-else chain.
+ // Multiway dispatch can be recovered later by ssautil.Switches()
+ // to those cases that are free of side effects.
+ if s.Init != nil {
+ b.stmt(fn, s.Init)
+ }
+ var tag Value = vTrue
+ if s.Tag != nil {
+ tag = b.expr(fn, s.Tag)
+ }
+ done := fn.newBasicBlock("switch.done")
+ if label != nil {
+ label._break = done
+ }
+ // We pull the default case (if present) down to the end.
+ // But each fallthrough label must point to the next
+ // body block in source order, so we preallocate a
+ // body block (fallthru) for the next case.
+ // Unfortunately this makes for a confusing block order.
+ var dfltBody *[]ast.Stmt
+ var dfltFallthrough *BasicBlock
+ var fallthru, dfltBlock *BasicBlock
+ ncases := len(s.Body.List)
+ for i, clause := range s.Body.List {
+ body := fallthru
+ if body == nil {
+ body = fn.newBasicBlock("switch.body") // first case only
+ }
+
+ // Preallocate body block for the next case.
+ fallthru = done
+ if i+1 < ncases {
+ fallthru = fn.newBasicBlock("switch.body")
+ }
+
+ cc := clause.(*ast.CaseClause)
+ if cc.List == nil {
+ // Default case.
+ dfltBody = &cc.Body
+ dfltFallthrough = fallthru
+ dfltBlock = body
+ continue
+ }
+
+ var nextCond *BasicBlock
+ for _, cond := range cc.List {
+ nextCond = fn.newBasicBlock("switch.next")
+ // TODO(adonovan): opt: when tag==vTrue, we'd
+ // get better code if we use b.cond(cond)
+ // instead of BinOp(EQL, tag, b.expr(cond))
+ // followed by If. Don't forget conversions
+ // though.
+ cond := emitCompare(fn, token.EQL, tag, b.expr(fn, cond), token.NoPos)
+ emitIf(fn, cond, body, nextCond)
+ fn.currentBlock = nextCond
+ }
+ fn.currentBlock = body
+ fn.targets = &targets{
+ tail: fn.targets,
+ _break: done,
+ _fallthrough: fallthru,
+ }
+ b.stmtList(fn, cc.Body)
+ fn.targets = fn.targets.tail
+ emitJump(fn, done)
+ fn.currentBlock = nextCond
+ }
+ if dfltBlock != nil {
+ emitJump(fn, dfltBlock)
+ fn.currentBlock = dfltBlock
+ fn.targets = &targets{
+ tail: fn.targets,
+ _break: done,
+ _fallthrough: dfltFallthrough,
+ }
+ b.stmtList(fn, *dfltBody)
+ fn.targets = fn.targets.tail
+ }
+ emitJump(fn, done)
+ fn.currentBlock = done
+}
+
+// typeSwitchStmt emits to fn code for the type switch statement s, optionally
+// labelled by label.
+//
+func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lblock) {
+ // We treat TypeSwitchStmt like a sequential if-else chain.
+ // Multiway dispatch can be recovered later by ssautil.Switches().
+
+ // Typeswitch lowering:
+ //
+ // var x X
+ // switch y := x.(type) {
+ // case T1, T2: S1 // >1 (y := x)
+ // case nil: SN // nil (y := x)
+ // default: SD // 0 types (y := x)
+ // case T3: S3 // 1 type (y := x.(T3))
+ // }
+ //
+ // ...s.Init...
+ // x := eval x
+ // .caseT1:
+ // t1, ok1 := typeswitch,ok x
+ // if ok1 then goto S1 else goto .caseT2
+ // .caseT2:
+ // t2, ok2 := typeswitch,ok x
+ // if ok2 then goto S1 else goto .caseNil
+ // .S1:
+ // y := x
+ // ...S1...
+ // goto done
+ // .caseNil:
+ // if t2, ok2 := typeswitch,ok x
+ // if x == nil then goto SN else goto .caseT3
+ // .SN:
+ // y := x
+ // ...SN...
+ // goto done
+ // .caseT3:
+ // t3, ok3 := typeswitch,ok x
+ // if ok3 then goto S3 else goto default
+ // .S3:
+ // y := t3
+ // ...S3...
+ // goto done
+ // .default:
+ // y := x
+ // ...SD...
+ // goto done
+ // .done:
+
+ if s.Init != nil {
+ b.stmt(fn, s.Init)
+ }
+
+ var x Value
+ switch ass := s.Assign.(type) {
+ case *ast.ExprStmt: // x.(type)
+ x = b.expr(fn, unparen(ass.X).(*ast.TypeAssertExpr).X)
+ case *ast.AssignStmt: // y := x.(type)
+ x = b.expr(fn, unparen(ass.Rhs[0]).(*ast.TypeAssertExpr).X)
+ }
+
+ done := fn.newBasicBlock("typeswitch.done")
+ if label != nil {
+ label._break = done
+ }
+ var default_ *ast.CaseClause
+ for _, clause := range s.Body.List {
+ cc := clause.(*ast.CaseClause)
+ if cc.List == nil {
+ default_ = cc
+ continue
+ }
+ body := fn.newBasicBlock("typeswitch.body")
+ var next *BasicBlock
+ var casetype types.Type
+ var ti Value // ti, ok := typeassert,ok x
+ for _, cond := range cc.List {
+ next = fn.newBasicBlock("typeswitch.next")
+ casetype = fn.Pkg.typeOf(cond)
+ var condv Value
+ if casetype == tUntypedNil {
+ condv = emitCompare(fn, token.EQL, x, nilConst(x.Type()), token.NoPos)
+ ti = x
+ } else {
+ yok := emitTypeTest(fn, x, casetype, cc.Case)
+ ti = emitExtract(fn, yok, 0)
+ condv = emitExtract(fn, yok, 1)
+ }
+ emitIf(fn, condv, body, next)
+ fn.currentBlock = next
+ }
+ if len(cc.List) != 1 {
+ ti = x
+ }
+ fn.currentBlock = body
+ b.typeCaseBody(fn, cc, ti, done)
+ fn.currentBlock = next
+ }
+ if default_ != nil {
+ b.typeCaseBody(fn, default_, x, done)
+ } else {
+ emitJump(fn, done)
+ }
+ fn.currentBlock = done
+}
+
+func (b *builder) typeCaseBody(fn *Function, cc *ast.CaseClause, x Value, done *BasicBlock) {
+ if obj := fn.Pkg.info.Implicits[cc]; obj != nil {
+ // In a switch y := x.(type), each case clause
+ // implicitly declares a distinct object y.
+ // In a single-type case, y has that type.
+ // In multi-type cases, 'case nil' and default,
+ // y has the same type as the interface operand.
+ emitStore(fn, fn.addNamedLocal(obj), x)
+ }
+ fn.targets = &targets{
+ tail: fn.targets,
+ _break: done,
+ }
+ b.stmtList(fn, cc.Body)
+ fn.targets = fn.targets.tail
+ emitJump(fn, done)
+}
+
+// selectStmt emits to fn code for the select statement s, optionally
+// labelled by label.
+//
+func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) {
+ // A blocking select of a single case degenerates to a
+ // simple send or receive.
+ // TODO(adonovan): opt: is this optimization worth its weight?
+ if len(s.Body.List) == 1 {
+ clause := s.Body.List[0].(*ast.CommClause)
+ if clause.Comm != nil {
+ b.stmt(fn, clause.Comm)
+ done := fn.newBasicBlock("select.done")
+ if label != nil {
+ label._break = done
+ }
+ fn.targets = &targets{
+ tail: fn.targets,
+ _break: done,
+ }
+ b.stmtList(fn, clause.Body)
+ fn.targets = fn.targets.tail
+ emitJump(fn, done)
+ fn.currentBlock = done
+ return
+ }
+ }
+
+ // First evaluate all channels in all cases, and find
+ // the directions of each state.
+ var states []*SelectState
+ blocking := true
+ debugInfo := fn.debugInfo()
+ for _, clause := range s.Body.List {
+ var st *SelectState
+ switch comm := clause.(*ast.CommClause).Comm.(type) {
+ case nil: // default case
+ blocking = false
+ continue
+
+ case *ast.SendStmt: // ch<- i
+ ch := b.expr(fn, comm.Chan)
+ st = &SelectState{
+ Dir: types.SendOnly,
+ Chan: ch,
+ Send: emitConv(fn, b.expr(fn, comm.Value),
+ ch.Type().Underlying().(*types.Chan).Elem()),
+ Pos: comm.Arrow,
+ }
+ if debugInfo {
+ st.DebugNode = comm
+ }
+
+ case *ast.AssignStmt: // x := <-ch
+ recv := unparen(comm.Rhs[0]).(*ast.UnaryExpr)
+ st = &SelectState{
+ Dir: types.RecvOnly,
+ Chan: b.expr(fn, recv.X),
+ Pos: recv.OpPos,
+ }
+ if debugInfo {
+ st.DebugNode = recv
+ }
+
+ case *ast.ExprStmt: // <-ch
+ recv := unparen(comm.X).(*ast.UnaryExpr)
+ st = &SelectState{
+ Dir: types.RecvOnly,
+ Chan: b.expr(fn, recv.X),
+ Pos: recv.OpPos,
+ }
+ if debugInfo {
+ st.DebugNode = recv
+ }
+ }
+ states = append(states, st)
+ }
+
+ // We dispatch on the (fair) result of Select using a
+ // sequential if-else chain, in effect:
+ //
+ // idx, recvOk, r0...r_n-1 := select(...)
+ // if idx == 0 { // receive on channel 0 (first receive => r0)
+ // x, ok := r0, recvOk
+ // ...state0...
+ // } else if v == 1 { // send on channel 1
+ // ...state1...
+ // } else {
+ // ...default...
+ // }
+ sel := &Select{
+ States: states,
+ Blocking: blocking,
+ }
+ sel.setPos(s.Select)
+ var vars []*types.Var
+ vars = append(vars, varIndex, varOk)
+ for _, st := range states {
+ if st.Dir == types.RecvOnly {
+ tElem := st.Chan.Type().Underlying().(*types.Chan).Elem()
+ vars = append(vars, anonVar(tElem))
+ }
+ }
+ sel.setType(types.NewTuple(vars...))
+
+ fn.emit(sel)
+ idx := emitExtract(fn, sel, 0)
+
+ done := fn.newBasicBlock("select.done")
+ if label != nil {
+ label._break = done
+ }
+
+ var defaultBody *[]ast.Stmt
+ state := 0
+ r := 2 // index in 'sel' tuple of value; increments if st.Dir==RECV
+ for _, cc := range s.Body.List {
+ clause := cc.(*ast.CommClause)
+ if clause.Comm == nil {
+ defaultBody = &clause.Body
+ continue
+ }
+ body := fn.newBasicBlock("select.body")
+ next := fn.newBasicBlock("select.next")
+ emitIf(fn, emitCompare(fn, token.EQL, idx, intConst(int64(state)), token.NoPos), body, next)
+ fn.currentBlock = body
+ fn.targets = &targets{
+ tail: fn.targets,
+ _break: done,
+ }
+ switch comm := clause.Comm.(type) {
+ case *ast.ExprStmt: // <-ch
+ if debugInfo {
+ v := emitExtract(fn, sel, r)
+ emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false)
+ }
+ r++
+
+ case *ast.AssignStmt: // x := <-states[state].Chan
+ if comm.Tok == token.DEFINE {
+ fn.addLocalForIdent(comm.Lhs[0].(*ast.Ident))
+ }
+ x := b.addr(fn, comm.Lhs[0], false) // non-escaping
+ v := emitExtract(fn, sel, r)
+ if debugInfo {
+ emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false)
+ }
+ x.store(fn, v)
+
+ if len(comm.Lhs) == 2 { // x, ok := ...
+ if comm.Tok == token.DEFINE {
+ fn.addLocalForIdent(comm.Lhs[1].(*ast.Ident))
+ }
+ ok := b.addr(fn, comm.Lhs[1], false) // non-escaping
+ ok.store(fn, emitExtract(fn, sel, 1))
+ }
+ r++
+ }
+ b.stmtList(fn, clause.Body)
+ fn.targets = fn.targets.tail
+ emitJump(fn, done)
+ fn.currentBlock = next
+ state++
+ }
+ if defaultBody != nil {
+ fn.targets = &targets{
+ tail: fn.targets,
+ _break: done,
+ }
+ b.stmtList(fn, *defaultBody)
+ fn.targets = fn.targets.tail
+ } else {
+ // A blocking select must match some case.
+ // (This should really be a runtime.errorString, not a string.)
+ fn.emit(&Panic{
+ X: emitConv(fn, stringConst("blocking select matched no case"), tEface),
+ })
+ fn.currentBlock = fn.newBasicBlock("unreachable")
+ }
+ emitJump(fn, done)
+ fn.currentBlock = done
+}
+
+// forStmt emits to fn code for the for statement s, optionally
+// labelled by label.
+//
+func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) {
+ // ...init...
+ // jump loop
+ // loop:
+ // if cond goto body else done
+ // body:
+ // ...body...
+ // jump post
+ // post: (target of continue)
+ // ...post...
+ // jump loop
+ // done: (target of break)
+ if s.Init != nil {
+ b.stmt(fn, s.Init)
+ }
+ body := fn.newBasicBlock("for.body")
+ done := fn.newBasicBlock("for.done") // target of 'break'
+ loop := body // target of back-edge
+ if s.Cond != nil {
+ loop = fn.newBasicBlock("for.loop")
+ }
+ cont := loop // target of 'continue'
+ if s.Post != nil {
+ cont = fn.newBasicBlock("for.post")
+ }
+ if label != nil {
+ label._break = done
+ label._continue = cont
+ }
+ emitJump(fn, loop)
+ fn.currentBlock = loop
+ if loop != body {
+ b.cond(fn, s.Cond, body, done)
+ fn.currentBlock = body
+ }
+ fn.targets = &targets{
+ tail: fn.targets,
+ _break: done,
+ _continue: cont,
+ }
+ b.stmt(fn, s.Body)
+ fn.targets = fn.targets.tail
+ emitJump(fn, cont)
+
+ if s.Post != nil {
+ fn.currentBlock = cont
+ b.stmt(fn, s.Post)
+ emitJump(fn, loop) // back-edge
+ }
+ fn.currentBlock = done
+}
+
+// rangeIndexed emits to fn the header for an integer-indexed loop
+// over array, *array or slice value x.
+// The v result is defined only if tv is non-nil.
+//
+func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type) (k, v Value, loop, done *BasicBlock) {
+ //
+ // length = len(x)
+ // index = -1
+ // loop: (target of continue)
+ // index++
+ // if index < length goto body else done
+ // body:
+ // k = index
+ // v = x[index]
+ // ...body...
+ // jump loop
+ // done: (target of break)
+
+ // Determine number of iterations.
+ var length Value
+ if arr, ok := deref(x.Type()).Underlying().(*types.Array); ok {
+ // For array or *array, the number of iterations is
+ // known statically thanks to the type. We avoid a
+ // data dependence upon x, permitting later dead-code
+ // elimination if x is pure, static unrolling, etc.
+ // Ranging over a nil *array may have >0 iterations.
+ // We still generate code for x, in case it has effects.
+ length = intConst(arr.Len())
+ } else {
+ // length = len(x).
+ var c Call
+ c.Call.Value = makeLen(x.Type())
+ c.Call.Args = []Value{x}
+ c.setType(tInt)
+ length = fn.emit(&c)
+ }
+
+ index := fn.addLocal(tInt, token.NoPos)
+ emitStore(fn, index, intConst(-1))
+
+ loop = fn.newBasicBlock("rangeindex.loop")
+ emitJump(fn, loop)
+ fn.currentBlock = loop
+
+ incr := &BinOp{
+ Op: token.ADD,
+ X: emitLoad(fn, index),
+ Y: vOne,
+ }
+ incr.setType(tInt)
+ emitStore(fn, index, fn.emit(incr))
+
+ body := fn.newBasicBlock("rangeindex.body")
+ done = fn.newBasicBlock("rangeindex.done")
+ emitIf(fn, emitCompare(fn, token.LSS, incr, length, token.NoPos), body, done)
+ fn.currentBlock = body
+
+ k = emitLoad(fn, index)
+ if tv != nil {
+ switch t := x.Type().Underlying().(type) {
+ case *types.Array:
+ instr := &Index{
+ X: x,
+ Index: k,
+ }
+ instr.setType(t.Elem())
+ v = fn.emit(instr)
+
+ case *types.Pointer: // *array
+ instr := &IndexAddr{
+ X: x,
+ Index: k,
+ }
+ instr.setType(types.NewPointer(t.Elem().Underlying().(*types.Array).Elem()))
+ v = emitLoad(fn, fn.emit(instr))
+
+ case *types.Slice:
+ instr := &IndexAddr{
+ X: x,
+ Index: k,
+ }
+ instr.setType(types.NewPointer(t.Elem()))
+ v = emitLoad(fn, fn.emit(instr))
+
+ default:
+ panic("rangeIndexed x:" + t.String())
+ }
+ }
+ return
+}
+
+// rangeIter emits to fn the header for a loop using
+// Range/Next/Extract to iterate over map or string value x.
+// tk and tv are the types of the key/value results k and v, or nil
+// if the respective component is not wanted.
+//
+func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) {
+ //
+ // it = range x
+ // loop: (target of continue)
+ // okv = next it (ok, key, value)
+ // ok = extract okv #0
+ // if ok goto body else done
+ // body:
+ // k = extract okv #1
+ // v = extract okv #2
+ // ...body...
+ // jump loop
+ // done: (target of break)
+ //
+
+ if tk == nil {
+ tk = tInvalid
+ }
+ if tv == nil {
+ tv = tInvalid
+ }
+
+ rng := &Range{X: x}
+ rng.setPos(pos)
+ rng.setType(tRangeIter)
+ it := fn.emit(rng)
+
+ loop = fn.newBasicBlock("rangeiter.loop")
+ emitJump(fn, loop)
+ fn.currentBlock = loop
+
+ _, isString := x.Type().Underlying().(*types.Basic)
+
+ okv := &Next{
+ Iter: it,
+ IsString: isString,
+ }
+ okv.setType(types.NewTuple(
+ varOk,
+ newVar("k", tk),
+ newVar("v", tv),
+ ))
+ fn.emit(okv)
+
+ body := fn.newBasicBlock("rangeiter.body")
+ done = fn.newBasicBlock("rangeiter.done")
+ emitIf(fn, emitExtract(fn, okv, 0), body, done)
+ fn.currentBlock = body
+
+ if tk != tInvalid {
+ k = emitExtract(fn, okv, 1)
+ }
+ if tv != tInvalid {
+ v = emitExtract(fn, okv, 2)
+ }
+ return
+}
+
+// rangeChan emits to fn the header for a loop that receives from
+// channel x until it fails.
+// tk is the channel's element type, or nil if the k result is
+// not wanted
+// pos is the position of the '=' or ':=' token.
+//
+func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos) (k Value, loop, done *BasicBlock) {
+ //
+ // loop: (target of continue)
+ // ko = <-x (key, ok)
+ // ok = extract ko #1
+ // if ok goto body else done
+ // body:
+ // k = extract ko #0
+ // ...
+ // goto loop
+ // done: (target of break)
+
+ loop = fn.newBasicBlock("rangechan.loop")
+ emitJump(fn, loop)
+ fn.currentBlock = loop
+ recv := &UnOp{
+ Op: token.ARROW,
+ X: x,
+ CommaOk: true,
+ }
+ recv.setPos(pos)
+ recv.setType(types.NewTuple(
+ newVar("k", x.Type().Underlying().(*types.Chan).Elem()),
+ varOk,
+ ))
+ ko := fn.emit(recv)
+ body := fn.newBasicBlock("rangechan.body")
+ done = fn.newBasicBlock("rangechan.done")
+ emitIf(fn, emitExtract(fn, ko, 1), body, done)
+ fn.currentBlock = body
+ if tk != nil {
+ k = emitExtract(fn, ko, 0)
+ }
+ return
+}
+
+// rangeStmt emits to fn code for the range statement s, optionally
+// labelled by label.
+//
+func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) {
+ var tk, tv types.Type
+ if s.Key != nil && !isBlankIdent(s.Key) {
+ tk = fn.Pkg.typeOf(s.Key)
+ }
+ if s.Value != nil && !isBlankIdent(s.Value) {
+ tv = fn.Pkg.typeOf(s.Value)
+ }
+
+ // If iteration variables are defined (:=), this
+ // occurs once outside the loop.
+ //
+ // Unlike a short variable declaration, a RangeStmt
+ // using := never redeclares an existing variable; it
+ // always creates a new one.
+ if s.Tok == token.DEFINE {
+ if tk != nil {
+ fn.addLocalForIdent(s.Key.(*ast.Ident))
+ }
+ if tv != nil {
+ fn.addLocalForIdent(s.Value.(*ast.Ident))
+ }
+ }
+
+ x := b.expr(fn, s.X)
+
+ var k, v Value
+ var loop, done *BasicBlock
+ switch rt := x.Type().Underlying().(type) {
+ case *types.Slice, *types.Array, *types.Pointer: // *array
+ k, v, loop, done = b.rangeIndexed(fn, x, tv)
+
+ case *types.Chan:
+ k, loop, done = b.rangeChan(fn, x, tk, s.TokPos)
+
+ case *types.Map, *types.Basic: // string
+ k, v, loop, done = b.rangeIter(fn, x, tk, tv, s.For)
+
+ default:
+ panic("Cannot range over: " + rt.String())
+ }
+
+ // Evaluate both LHS expressions before we update either.
+ var kl, vl lvalue
+ if tk != nil {
+ kl = b.addr(fn, s.Key, false) // non-escaping
+ }
+ if tv != nil {
+ vl = b.addr(fn, s.Value, false) // non-escaping
+ }
+ if tk != nil {
+ kl.store(fn, k)
+ }
+ if tv != nil {
+ vl.store(fn, v)
+ }
+
+ if label != nil {
+ label._break = done
+ label._continue = loop
+ }
+
+ fn.targets = &targets{
+ tail: fn.targets,
+ _break: done,
+ _continue: loop,
+ }
+ b.stmt(fn, s.Body)
+ fn.targets = fn.targets.tail
+ emitJump(fn, loop) // back-edge
+ fn.currentBlock = done
+}
+
+// stmt lowers statement s to SSA form, emitting code to fn.
+func (b *builder) stmt(fn *Function, _s ast.Stmt) {
+ // The label of the current statement. If non-nil, its _goto
+ // target is always set; its _break and _continue are set only
+ // within the body of switch/typeswitch/select/for/range.
+ // It is effectively an additional default-nil parameter of stmt().
+ var label *lblock
+start:
+ switch s := _s.(type) {
+ case *ast.EmptyStmt:
+ // ignore. (Usually removed by gofmt.)
+
+ case *ast.DeclStmt: // Con, Var or Typ
+ d := s.Decl.(*ast.GenDecl)
+ if d.Tok == token.VAR {
+ for _, spec := range d.Specs {
+ if vs, ok := spec.(*ast.ValueSpec); ok {
+ b.localValueSpec(fn, vs)
+ }
+ }
+ }
+
+ case *ast.LabeledStmt:
+ label = fn.labelledBlock(s.Label)
+ emitJump(fn, label._goto)
+ fn.currentBlock = label._goto
+ _s = s.Stmt
+ goto start // effectively: tailcall stmt(fn, s.Stmt, label)
+
+ case *ast.ExprStmt:
+ b.expr(fn, s.X)
+
+ case *ast.SendStmt:
+ fn.emit(&Send{
+ Chan: b.expr(fn, s.Chan),
+ X: emitConv(fn, b.expr(fn, s.Value),
+ fn.Pkg.typeOf(s.Chan).Underlying().(*types.Chan).Elem()),
+ pos: s.Arrow,
+ })
+
+ case *ast.IncDecStmt:
+ op := token.ADD
+ if s.Tok == token.DEC {
+ op = token.SUB
+ }
+ loc := b.addr(fn, s.X, false)
+ b.assignOp(fn, loc, NewConst(exact.MakeInt64(1), loc.typ()), op)
+
+ case *ast.AssignStmt:
+ switch s.Tok {
+ case token.ASSIGN, token.DEFINE:
+ b.assignStmt(fn, s.Lhs, s.Rhs, s.Tok == token.DEFINE)
+
+ default: // +=, etc.
+ op := s.Tok + token.ADD - token.ADD_ASSIGN
+ b.assignOp(fn, b.addr(fn, s.Lhs[0], false), b.expr(fn, s.Rhs[0]), op)
+ }
+
+ case *ast.GoStmt:
+ // The "intrinsics" new/make/len/cap are forbidden here.
+ // panic is treated like an ordinary function call.
+ v := Go{pos: s.Go}
+ b.setCall(fn, s.Call, &v.Call)
+ fn.emit(&v)
+
+ case *ast.DeferStmt:
+ // The "intrinsics" new/make/len/cap are forbidden here.
+ // panic is treated like an ordinary function call.
+ v := Defer{pos: s.Defer}
+ b.setCall(fn, s.Call, &v.Call)
+ fn.emit(&v)
+
+ // A deferred call can cause recovery from panic,
+ // and control resumes at the Recover block.
+ createRecoverBlock(fn)
+
+ case *ast.ReturnStmt:
+ var results []Value
+ if len(s.Results) == 1 && fn.Signature.Results().Len() > 1 {
+ // Return of one expression in a multi-valued function.
+ tuple := b.exprN(fn, s.Results[0])
+ ttuple := tuple.Type().(*types.Tuple)
+ for i, n := 0, ttuple.Len(); i < n; i++ {
+ results = append(results,
+ emitConv(fn, emitExtract(fn, tuple, i),
+ fn.Signature.Results().At(i).Type()))
+ }
+ } else {
+ // 1:1 return, or no-arg return in non-void function.
+ for i, r := range s.Results {
+ v := emitConv(fn, b.expr(fn, r), fn.Signature.Results().At(i).Type())
+ results = append(results, v)
+ }
+ }
+ if fn.namedResults != nil {
+ // Function has named result parameters (NRPs).
+ // Perform parallel assignment of return operands to NRPs.
+ for i, r := range results {
+ emitStore(fn, fn.namedResults[i], r)
+ }
+ }
+ // Run function calls deferred in this
+ // function when explicitly returning from it.
+ fn.emit(new(RunDefers))
+ if fn.namedResults != nil {
+ // Reload NRPs to form the result tuple.
+ results = results[:0]
+ for _, r := range fn.namedResults {
+ results = append(results, emitLoad(fn, r))
+ }
+ }
+ fn.emit(&Return{Results: results, pos: s.Return})
+ fn.currentBlock = fn.newBasicBlock("unreachable")
+
+ case *ast.BranchStmt:
+ var block *BasicBlock
+ switch s.Tok {
+ case token.BREAK:
+ if s.Label != nil {
+ block = fn.labelledBlock(s.Label)._break
+ } else {
+ for t := fn.targets; t != nil && block == nil; t = t.tail {
+ block = t._break
+ }
+ }
+
+ case token.CONTINUE:
+ if s.Label != nil {
+ block = fn.labelledBlock(s.Label)._continue
+ } else {
+ for t := fn.targets; t != nil && block == nil; t = t.tail {
+ block = t._continue
+ }
+ }
+
+ case token.FALLTHROUGH:
+ for t := fn.targets; t != nil && block == nil; t = t.tail {
+ block = t._fallthrough
+ }
+
+ case token.GOTO:
+ block = fn.labelledBlock(s.Label)._goto
+ }
+ emitJump(fn, block)
+ fn.currentBlock = fn.newBasicBlock("unreachable")
+
+ case *ast.BlockStmt:
+ b.stmtList(fn, s.List)
+
+ case *ast.IfStmt:
+ if s.Init != nil {
+ b.stmt(fn, s.Init)
+ }
+ then := fn.newBasicBlock("if.then")
+ done := fn.newBasicBlock("if.done")
+ els := done
+ if s.Else != nil {
+ els = fn.newBasicBlock("if.else")
+ }
+ b.cond(fn, s.Cond, then, els)
+ fn.currentBlock = then
+ b.stmt(fn, s.Body)
+ emitJump(fn, done)
+
+ if s.Else != nil {
+ fn.currentBlock = els
+ b.stmt(fn, s.Else)
+ emitJump(fn, done)
+ }
+
+ fn.currentBlock = done
+
+ case *ast.SwitchStmt:
+ b.switchStmt(fn, s, label)
+
+ case *ast.TypeSwitchStmt:
+ b.typeSwitchStmt(fn, s, label)
+
+ case *ast.SelectStmt:
+ b.selectStmt(fn, s, label)
+
+ case *ast.ForStmt:
+ b.forStmt(fn, s, label)
+
+ case *ast.RangeStmt:
+ b.rangeStmt(fn, s, label)
+
+ default:
+ panic(fmt.Sprintf("unexpected statement kind: %T", s))
+ }
+}
+
+// buildFunction builds SSA code for the body of function fn. Idempotent.
+func (b *builder) buildFunction(fn *Function) {
+ if fn.Blocks != nil {
+ return // building already started
+ }
+
+ var recvField *ast.FieldList
+ var body *ast.BlockStmt
+ var functype *ast.FuncType
+ switch n := fn.syntax.(type) {
+ case nil:
+ return // not a Go source function. (Synthetic, or from object file.)
+ case *ast.FuncDecl:
+ functype = n.Type
+ recvField = n.Recv
+ body = n.Body
+ case *ast.FuncLit:
+ functype = n.Type
+ body = n.Body
+ default:
+ panic(n)
+ }
+
+ if body == nil {
+ // External function.
+ if fn.Params == nil {
+ // This condition ensures we add a non-empty
+ // params list once only, but we may attempt
+ // the degenerate empty case repeatedly.
+ // TODO(adonovan): opt: don't do that.
+
+ // We set Function.Params even though there is no body
+ // code to reference them. This simplifies clients.
+ if recv := fn.Signature.Recv(); recv != nil {
+ fn.addParamObj(recv)
+ }
+ params := fn.Signature.Params()
+ for i, n := 0, params.Len(); i < n; i++ {
+ fn.addParamObj(params.At(i))
+ }
+ }
+ return
+ }
+ if fn.Prog.mode&LogSource != 0 {
+ defer logStack("build function %s @ %s", fn, fn.Prog.Fset.Position(fn.pos))()
+ }
+ fn.startBody()
+ fn.createSyntacticParams(recvField, functype)
+ b.stmt(fn, body)
+ if cb := fn.currentBlock; cb != nil && (cb == fn.Blocks[0] || cb == fn.Recover || cb.Preds != nil) {
+ // Control fell off the end of the function's body block.
+ //
+ // Block optimizations eliminate the current block, if
+ // unreachable. It is a builder invariant that
+ // if this no-arg return is ill-typed for
+ // fn.Signature.Results, this block must be
+ // unreachable. The sanity checker checks this.
+ fn.emit(new(RunDefers))
+ fn.emit(new(Return))
+ }
+ fn.finishBody()
+}
+
+// buildFuncDecl builds SSA code for the function or method declared
+// by decl in package pkg.
+//
+func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) {
+ id := decl.Name
+ if isBlankIdent(id) {
+ return // discard
+ }
+ var fn *Function
+ if decl.Recv == nil && id.Name == "init" {
+ pkg.ninit++
+ fn = &Function{
+ name: fmt.Sprintf("init#%d", pkg.ninit),
+ Signature: new(types.Signature),
+ pos: decl.Name.NamePos,
+ Pkg: pkg,
+ Prog: pkg.Prog,
+ syntax: decl,
+ }
+
+ var v Call
+ v.Call.Value = fn
+ v.setType(types.NewTuple())
+ pkg.init.emit(&v)
+ } else {
+ fn = pkg.values[pkg.info.Defs[id]].(*Function)
+ }
+ b.buildFunction(fn)
+}
+
+// BuildAll calls Package.Build() for each package in prog.
+// Building occurs in parallel unless the BuildSerially mode flag was set.
+//
+// BuildAll is idempotent and thread-safe.
+//
+func (prog *Program) BuildAll() {
+ var wg sync.WaitGroup
+ for _, p := range prog.packages {
+ if prog.mode&BuildSerially != 0 {
+ p.Build()
+ } else {
+ wg.Add(1)
+ go func(p *Package) {
+ p.Build()
+ wg.Done()
+ }(p)
+ }
+ }
+ wg.Wait()
+}
+
+// Build builds SSA code for all functions and vars in package p.
+//
+// Precondition: CreatePackage must have been called for all of p's
+// direct imports (and hence its direct imports must have been
+// error-free).
+//
+// Build is idempotent and thread-safe.
+//
+func (p *Package) Build() {
+ if !atomic.CompareAndSwapInt32(&p.started, 0, 1) {
+ return // already started
+ }
+ if p.info == nil {
+ return // synthetic package, e.g. "testmain"
+ }
+ if len(p.info.Files) == 0 {
+ p.info = nil
+ return // package loaded from export data
+ }
+
+ // Ensure we have runtime type info for all exported members.
+ // TODO(adonovan): ideally belongs in memberFromObject, but
+ // that would require package creation in topological order.
+ for name, mem := range p.Members {
+ if ast.IsExported(name) {
+ p.needMethodsOf(mem.Type())
+ }
+ }
+ if p.Prog.mode&LogSource != 0 {
+ defer logStack("build %s", p)()
+ }
+ init := p.init
+ init.startBody()
+
+ var done *BasicBlock
+
+ if p.Prog.mode&BareInits == 0 {
+ // Make init() skip if package is already initialized.
+ initguard := p.Var("init$guard")
+ doinit := init.newBasicBlock("init.start")
+ done = init.newBasicBlock("init.done")
+ emitIf(init, emitLoad(init, initguard), done, doinit)
+ init.currentBlock = doinit
+ emitStore(init, initguard, vTrue)
+
+ // Call the init() function of each package we import.
+ for _, pkg := range p.info.Pkg.Imports() {
+ prereq := p.Prog.packages[pkg]
+ if prereq == nil {
+ panic(fmt.Sprintf("Package(%q).Build(): unsatisfied import: Program.CreatePackage(%q) was not called", p.Object.Path(), pkg.Path()))
+ }
+ var v Call
+ v.Call.Value = prereq.init
+ v.Call.pos = init.pos
+ v.setType(types.NewTuple())
+ init.emit(&v)
+ }
+ }
+
+ var b builder
+
+ // Initialize package-level vars in correct order.
+ for _, varinit := range p.info.InitOrder {
+ if init.Prog.mode&LogSource != 0 {
+ fmt.Fprintf(os.Stderr, "build global initializer %v @ %s\n",
+ varinit.Lhs, p.Prog.Fset.Position(varinit.Rhs.Pos()))
+ }
+ if len(varinit.Lhs) == 1 {
+ // 1:1 initialization: var x, y = a(), b()
+ var lval lvalue
+ if v := varinit.Lhs[0]; v.Name() != "_" {
+ lval = &address{addr: p.values[v].(*Global)}
+ } else {
+ lval = blank{}
+ }
+ b.exprInPlace(init, lval, varinit.Rhs, true)
+ } else {
+ // n:1 initialization: var x, y := f()
+ tuple := b.exprN(init, varinit.Rhs)
+ for i, v := range varinit.Lhs {
+ if v.Name() == "_" {
+ continue
+ }
+ emitStore(init, p.values[v].(*Global), emitExtract(init, tuple, i))
+ }
+ }
+ }
+
+ // Build all package-level functions, init functions
+ // and methods, including unreachable/blank ones.
+ // We build them in source order, but it's not significant.
+ for _, file := range p.info.Files {
+ for _, decl := range file.Decls {
+ if decl, ok := decl.(*ast.FuncDecl); ok {
+ b.buildFuncDecl(p, decl)
+ }
+ }
+ }
+
+ // Finish up init().
+ if p.Prog.mode&BareInits == 0 {
+ emitJump(init, done)
+ init.currentBlock = done
+ }
+ init.emit(new(Return))
+ init.finishBody()
+
+ p.info = nil // We no longer need ASTs or go/types deductions.
+
+ if p.Prog.mode&SanityCheckFunctions != 0 {
+ sanityCheckPackage(p)
+ }
+}
+
+// Like ObjectOf, but panics instead of returning nil.
+// Only valid during p's create and build phases.
+func (p *Package) objectOf(id *ast.Ident) types.Object {
+ if o := p.info.ObjectOf(id); o != nil {
+ return o
+ }
+ panic(fmt.Sprintf("no types.Object for ast.Ident %s @ %s",
+ id.Name, p.Prog.Fset.Position(id.Pos())))
+}
+
+// Like TypeOf, but panics instead of returning nil.
+// Only valid during p's create and build phases.
+func (p *Package) typeOf(e ast.Expr) types.Type {
+ if T := p.info.TypeOf(e); T != nil {
+ return T
+ }
+ panic(fmt.Sprintf("no type for %T @ %s",
+ e, p.Prog.Fset.Position(e.Pos())))
+}
+
+// needMethodsOf ensures that runtime type information (including the
+// complete method set) is available for the specified type T and all
+// its subcomponents.
+//
+// needMethodsOf must be called for at least every type that is an
+// operand of some MakeInterface instruction, and for the type of
+// every exported package member.
+//
+// Precondition: T is not a method signature (*Signature with Recv()!=nil).
+//
+// Thread-safe. (Called via emitConv from multiple builder goroutines.)
+//
+// TODO(adonovan): make this faster. It accounts for 20% of SSA build
+// time. Do we need to maintain a distinct needRTTI and methodSets per
+// package? Using just one in the program might be much faster.
+//
+func (p *Package) needMethodsOf(T types.Type) {
+ p.methodsMu.Lock()
+ p.needMethods(T, false)
+ p.methodsMu.Unlock()
+}
+
+// Precondition: T is not a method signature (*Signature with Recv()!=nil).
+// Precondition: the p.methodsMu lock is held.
+// Recursive case: skip => don't call makeMethods(T).
+func (p *Package) needMethods(T types.Type, skip bool) {
+ // Each package maintains its own set of types it has visited.
+ if prevSkip, ok := p.needRTTI.At(T).(bool); ok {
+ // needMethods(T) was previously called
+ if !prevSkip || skip {
+ return // already seen, with same or false 'skip' value
+ }
+ }
+ p.needRTTI.Set(T, skip)
+
+ // Prune the recursion if we find a named or *named type
+ // belonging to another package.
+ var n *types.Named
+ switch T := T.(type) {
+ case *types.Named:
+ n = T
+ case *types.Pointer:
+ n, _ = T.Elem().(*types.Named)
+ }
+ if n != nil {
+ owner := n.Obj().Pkg()
+ if owner == nil {
+ return // built-in error type
+ }
+ if owner != p.Object {
+ return // belongs to another package
+ }
+ }
+
+ // All the actual method sets live in the Program so that
+ // multiple packages can share a single copy in memory of the
+ // symbols that would be compiled into multiple packages (as
+ // weak symbols).
+ if !skip && p.Prog.makeMethods(T) {
+ p.methodSets = append(p.methodSets, T)
+ }
+
+ // Recursion over signatures of each method.
+ tmset := p.Prog.MethodSets.MethodSet(T)
+ for i := 0; i < tmset.Len(); i++ {
+ sig := tmset.At(i).Type().(*types.Signature)
+ p.needMethods(sig.Params(), false)
+ p.needMethods(sig.Results(), false)
+ }
+
+ switch t := T.(type) {
+ case *types.Basic:
+ // nop
+
+ case *types.Interface:
+ // nop---handled by recursion over method set.
+
+ case *types.Pointer:
+ p.needMethods(t.Elem(), false)
+
+ case *types.Slice:
+ p.needMethods(t.Elem(), false)
+
+ case *types.Chan:
+ p.needMethods(t.Elem(), false)
+
+ case *types.Map:
+ p.needMethods(t.Key(), false)
+ p.needMethods(t.Elem(), false)
+
+ case *types.Signature:
+ if t.Recv() != nil {
+ panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv()))
+ }
+ p.needMethods(t.Params(), false)
+ p.needMethods(t.Results(), false)
+
+ case *types.Named:
+ // A pointer-to-named type can be derived from a named
+ // type via reflection. It may have methods too.
+ p.needMethods(types.NewPointer(T), false)
+
+ // Consider 'type T struct{S}' where S has methods.
+ // Reflection provides no way to get from T to struct{S},
+ // only to S, so the method set of struct{S} is unwanted,
+ // so set 'skip' flag during recursion.
+ p.needMethods(t.Underlying(), true)
+
+ case *types.Array:
+ p.needMethods(t.Elem(), false)
+
+ case *types.Struct:
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ p.needMethods(t.Field(i).Type(), false)
+ }
+
+ case *types.Tuple:
+ for i, n := 0, t.Len(); i < n; i++ {
+ p.needMethods(t.At(i).Type(), false)
+ }
+
+ default:
+ panic(T)
+ }
+}
diff --git a/llgo/third_party/go.tools/go/ssa/builder_test.go b/llgo/third_party/go.tools/go/ssa/builder_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..2a2b517caa1e31457f7a511de496f58df65dfb46
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/builder_test.go
@@ -0,0 +1,319 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+import (
+ "bytes"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+func isEmpty(f *ssa.Function) bool { return f.Blocks == nil }
+
+// Tests that programs partially loaded from gc object files contain
+// functions with no code for the external portions, but are otherwise ok.
+func TestExternalPackages(t *testing.T) {
+ test := `
+package main
+
+import (
+ "bytes"
+ "io"
+ "testing"
+)
+
+func main() {
+ var t testing.T
+ t.Parallel() // static call to external declared method
+ t.Fail() // static call to promoted external declared method
+ testing.Short() // static call to external package-level function
+
+ var w io.Writer = new(bytes.Buffer)
+ w.Write(nil) // interface invoke of external declared method
+}
+`
+
+ // Create a single-file main package.
+ var conf loader.Config
+ f, err := conf.ParseFile(" ", test)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ conf.CreateFromFiles("main", f)
+
+ iprog, err := conf.Load()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ prog := ssa.Create(iprog, ssa.SanityCheckFunctions)
+ mainPkg := prog.Package(iprog.Created[0].Pkg)
+ mainPkg.Build()
+
+ // The main package, its direct and indirect dependencies are loaded.
+ deps := []string{
+ // directly imported dependencies:
+ "bytes", "io", "testing",
+ // indirect dependencies (partial list):
+ "errors", "fmt", "os", "runtime",
+ }
+
+ all := prog.AllPackages()
+ if len(all) <= len(deps) {
+ t.Errorf("unexpected set of loaded packages: %q", all)
+ }
+ for _, path := range deps {
+ pkg := prog.ImportedPackage(path)
+ if pkg == nil {
+ t.Errorf("package not loaded: %q", path)
+ continue
+ }
+
+ // External packages should have no function bodies (except for wrappers).
+ isExt := pkg != mainPkg
+
+ // init()
+ if isExt && !isEmpty(pkg.Func("init")) {
+ t.Errorf("external package %s has non-empty init", pkg)
+ } else if !isExt && isEmpty(pkg.Func("init")) {
+ t.Errorf("main package %s has empty init", pkg)
+ }
+
+ for _, mem := range pkg.Members {
+ switch mem := mem.(type) {
+ case *ssa.Function:
+ // Functions at package level.
+ if isExt && !isEmpty(mem) {
+ t.Errorf("external function %s is non-empty", mem)
+ } else if !isExt && isEmpty(mem) {
+ t.Errorf("function %s is empty", mem)
+ }
+
+ case *ssa.Type:
+ // Methods of named types T.
+ // (In this test, all exported methods belong to *T not T.)
+ if !isExt {
+ t.Fatalf("unexpected name type in main package: %s", mem)
+ }
+ mset := prog.MethodSets.MethodSet(types.NewPointer(mem.Type()))
+ for i, n := 0, mset.Len(); i < n; i++ {
+ m := prog.Method(mset.At(i))
+ // For external types, only synthetic wrappers have code.
+ expExt := !strings.Contains(m.Synthetic, "wrapper")
+ if expExt && !isEmpty(m) {
+ t.Errorf("external method %s is non-empty: %s",
+ m, m.Synthetic)
+ } else if !expExt && isEmpty(m) {
+ t.Errorf("method function %s is empty: %s",
+ m, m.Synthetic)
+ }
+ }
+ }
+ }
+ }
+
+ expectedCallee := []string{
+ "(*testing.T).Parallel",
+ "(*testing.common).Fail",
+ "testing.Short",
+ "N/A",
+ }
+ callNum := 0
+ for _, b := range mainPkg.Func("main").Blocks {
+ for _, instr := range b.Instrs {
+ switch instr := instr.(type) {
+ case ssa.CallInstruction:
+ call := instr.Common()
+ if want := expectedCallee[callNum]; want != "N/A" {
+ got := call.StaticCallee().String()
+ if want != got {
+ t.Errorf("call #%d from main.main: got callee %s, want %s",
+ callNum, got, want)
+ }
+ }
+ callNum++
+ }
+ }
+ }
+ if callNum != 4 {
+ t.Errorf("in main.main: got %d calls, want %d", callNum, 4)
+ }
+}
+
+// TestTypesWithMethodSets tests that Package.TypesWithMethodSets includes all necessary types.
+func TestTypesWithMethodSets(t *testing.T) {
+ tests := []struct {
+ input string
+ want []string
+ }{
+ // An exported package-level type is needed.
+ {`package A; type T struct{}; func (T) f() {}`,
+ []string{"*p.T", "p.T"},
+ },
+ // An unexported package-level type is not needed.
+ {`package B; type t struct{}; func (t) f() {}`,
+ nil,
+ },
+ // Subcomponents of type of exported package-level var are needed.
+ {`package C; import "bytes"; var V struct {*bytes.Buffer}`,
+ []string{"*struct{*bytes.Buffer}", "struct{*bytes.Buffer}"},
+ },
+ // Subcomponents of type of unexported package-level var are not needed.
+ {`package D; import "bytes"; var v struct {*bytes.Buffer}`,
+ nil,
+ },
+ // Subcomponents of type of exported package-level function are needed.
+ {`package E; import "bytes"; func F(struct {*bytes.Buffer}) {}`,
+ []string{"struct{*bytes.Buffer}"},
+ },
+ // Subcomponents of type of unexported package-level function are not needed.
+ {`package F; import "bytes"; func f(struct {*bytes.Buffer}) {}`,
+ nil,
+ },
+ // Subcomponents of type of exported method of uninstantiated unexported type are not needed.
+ {`package G; import "bytes"; type x struct{}; func (x) G(struct {*bytes.Buffer}) {}; var v x`,
+ nil,
+ },
+ // ...unless used by MakeInterface.
+ {`package G2; import "bytes"; type x struct{}; func (x) G(struct {*bytes.Buffer}) {}; var v interface{} = x{}`,
+ []string{"*p.x", "p.x", "struct{*bytes.Buffer}"},
+ },
+ // Subcomponents of type of unexported method are not needed.
+ {`package I; import "bytes"; type X struct{}; func (X) G(struct {*bytes.Buffer}) {}`,
+ []string{"*p.X", "p.X", "struct{*bytes.Buffer}"},
+ },
+ // Local types aren't needed.
+ {`package J; import "bytes"; func f() { type T struct {*bytes.Buffer}; var t T; _ = t }`,
+ nil,
+ },
+ // ...unless used by MakeInterface.
+ {`package K; import "bytes"; func f() { type T struct {*bytes.Buffer}; _ = interface{}(T{}) }`,
+ []string{"*p.T", "p.T"},
+ },
+ // Types used as operand of MakeInterface are needed.
+ {`package L; import "bytes"; func f() { _ = interface{}(struct{*bytes.Buffer}{}) }`,
+ []string{"struct{*bytes.Buffer}"},
+ },
+ // MakeInterface is optimized away when storing to a blank.
+ {`package M; import "bytes"; var _ interface{} = struct{*bytes.Buffer}{}`,
+ nil,
+ },
+ }
+ for _, test := range tests {
+ // Create a single-file main package.
+ var conf loader.Config
+ f, err := conf.ParseFile(" ", test.input)
+ if err != nil {
+ t.Errorf("test %q: %s", test.input[:15], err)
+ continue
+ }
+ conf.CreateFromFiles("p", f)
+
+ iprog, err := conf.Load()
+ if err != nil {
+ t.Errorf("test 'package %s': Load: %s", f.Name.Name, err)
+ continue
+ }
+ prog := ssa.Create(iprog, ssa.SanityCheckFunctions)
+ mainPkg := prog.Package(iprog.Created[0].Pkg)
+ prog.BuildAll()
+
+ var typstrs []string
+ for _, T := range mainPkg.TypesWithMethodSets() {
+ typstrs = append(typstrs, T.String())
+ }
+ sort.Strings(typstrs)
+
+ if !reflect.DeepEqual(typstrs, test.want) {
+ t.Errorf("test 'package %s': got %q, want %q", f.Name.Name, typstrs, test.want)
+ }
+ }
+}
+
+// Tests that synthesized init functions are correctly formed.
+// Bare init functions omit calls to dependent init functions and the use of
+// an init guard. They are useful in cases where the client uses a different
+// calling convention for init functions, or cases where it is easier for a
+// client to analyze bare init functions. Both of these aspects are used by
+// the llgo compiler for simpler integration with gccgo's runtime library,
+// and to simplify the analysis whereby it deduces which stores to globals
+// can be lowered to global initializers.
+func TestInit(t *testing.T) {
+ tests := []struct {
+ mode ssa.BuilderMode
+ input, want string
+ }{
+ {0, `package A; import _ "errors"; var i int = 42`,
+ `# Name: A.init
+# Package: A
+# Synthetic: package initializer
+func init():
+0: entry P:0 S:2
+ t0 = *init$guard bool
+ if t0 goto 2 else 1
+1: init.start P:1 S:1
+ *init$guard = true:bool
+ t1 = errors.init() ()
+ *i = 42:int
+ jump 2
+2: init.done P:2 S:0
+ return
+
+`},
+ {ssa.BareInits, `package B; import _ "errors"; var i int = 42`,
+ `# Name: B.init
+# Package: B
+# Synthetic: package initializer
+func init():
+0: entry P:0 S:0
+ *i = 42:int
+ return
+
+`},
+ }
+ for _, test := range tests {
+ // Create a single-file main package.
+ var conf loader.Config
+ f, err := conf.ParseFile(" ", test.input)
+ if err != nil {
+ t.Errorf("test %q: %s", test.input[:15], err)
+ continue
+ }
+ conf.CreateFromFiles(f.Name.Name, f)
+
+ iprog, err := conf.Load()
+ if err != nil {
+ t.Errorf("test 'package %s': Load: %s", f.Name.Name, err)
+ continue
+ }
+ prog := ssa.Create(iprog, test.mode)
+ mainPkg := prog.Package(iprog.Created[0].Pkg)
+ prog.BuildAll()
+ initFunc := mainPkg.Func("init")
+ if initFunc == nil {
+ t.Errorf("test 'package %s': no init function", f.Name.Name)
+ continue
+ }
+
+ var initbuf bytes.Buffer
+ _, err = initFunc.WriteTo(&initbuf)
+ if err != nil {
+ t.Errorf("test 'package %s': WriteTo: %s", f.Name.Name, err)
+ continue
+ }
+
+ if initbuf.String() != test.want {
+ t.Errorf("test 'package %s': got %s, want %s", f.Name.Name, initbuf.String(), test.want)
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/go/ssa/const.go b/llgo/third_party/go.tools/go/ssa/const.go
new file mode 100644
index 0000000000000000000000000000000000000000..cf81e50e5abd8cf5cd91a506285f7eb01c070f1e
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/const.go
@@ -0,0 +1,168 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file defines the Const SSA value type.
+
+import (
+ "fmt"
+ "go/token"
+ "strconv"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// NewConst returns a new constant of the specified value and type.
+// val must be valid according to the specification of Const.Value.
+//
+func NewConst(val exact.Value, typ types.Type) *Const {
+ return &Const{typ, val}
+}
+
+// intConst returns an 'int' constant that evaluates to i.
+// (i is an int64 in case the host is narrower than the target.)
+func intConst(i int64) *Const {
+ return NewConst(exact.MakeInt64(i), tInt)
+}
+
+// nilConst returns a nil constant of the specified type, which may
+// be any reference type, including interfaces.
+//
+func nilConst(typ types.Type) *Const {
+ return NewConst(nil, typ)
+}
+
+// stringConst returns a 'string' constant that evaluates to s.
+func stringConst(s string) *Const {
+ return NewConst(exact.MakeString(s), tString)
+}
+
+// zeroConst returns a new "zero" constant of the specified type,
+// which must not be an array or struct type: the zero values of
+// aggregates are well-defined but cannot be represented by Const.
+//
+func zeroConst(t types.Type) *Const {
+ switch t := t.(type) {
+ case *types.Basic:
+ switch {
+ case t.Info()&types.IsBoolean != 0:
+ return NewConst(exact.MakeBool(false), t)
+ case t.Info()&types.IsNumeric != 0:
+ return NewConst(exact.MakeInt64(0), t)
+ case t.Info()&types.IsString != 0:
+ return NewConst(exact.MakeString(""), t)
+ case t.Kind() == types.UnsafePointer:
+ fallthrough
+ case t.Kind() == types.UntypedNil:
+ return nilConst(t)
+ default:
+ panic(fmt.Sprint("zeroConst for unexpected type:", t))
+ }
+ case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
+ return nilConst(t)
+ case *types.Named:
+ return NewConst(zeroConst(t.Underlying()).Value, t)
+ case *types.Array, *types.Struct, *types.Tuple:
+ panic(fmt.Sprint("zeroConst applied to aggregate:", t))
+ }
+ panic(fmt.Sprint("zeroConst: unexpected ", t))
+}
+
+func (c *Const) RelString(from *types.Package) string {
+ var s string
+ if c.Value == nil {
+ s = "nil"
+ } else if c.Value.Kind() == exact.String {
+ s = exact.StringVal(c.Value)
+ const max = 20
+ // TODO(adonovan): don't cut a rune in half.
+ if len(s) > max {
+ s = s[:max-3] + "..." // abbreviate
+ }
+ s = strconv.Quote(s)
+ } else {
+ s = c.Value.String()
+ }
+ return s + ":" + relType(c.Type(), from)
+}
+
+func (c *Const) Name() string {
+ return c.RelString(nil)
+}
+
+func (c *Const) String() string {
+ return c.Name()
+}
+
+func (c *Const) Type() types.Type {
+ return c.typ
+}
+
+func (c *Const) Referrers() *[]Instruction {
+ return nil
+}
+
+func (c *Const) Parent() *Function { return nil }
+
+func (c *Const) Pos() token.Pos {
+ return token.NoPos
+}
+
+// IsNil returns true if this constant represents a typed or untyped nil value.
+func (c *Const) IsNil() bool {
+ return c.Value == nil
+}
+
+// Int64 returns the numeric value of this constant truncated to fit
+// a signed 64-bit integer.
+//
+func (c *Const) Int64() int64 {
+ switch x := c.Value; x.Kind() {
+ case exact.Int:
+ if i, ok := exact.Int64Val(x); ok {
+ return i
+ }
+ return 0
+ case exact.Float:
+ f, _ := exact.Float64Val(x)
+ return int64(f)
+ }
+ panic(fmt.Sprintf("unexpected constant value: %T", c.Value))
+}
+
+// Uint64 returns the numeric value of this constant truncated to fit
+// an unsigned 64-bit integer.
+//
+func (c *Const) Uint64() uint64 {
+ switch x := c.Value; x.Kind() {
+ case exact.Int:
+ if u, ok := exact.Uint64Val(x); ok {
+ return u
+ }
+ return 0
+ case exact.Float:
+ f, _ := exact.Float64Val(x)
+ return uint64(f)
+ }
+ panic(fmt.Sprintf("unexpected constant value: %T", c.Value))
+}
+
+// Float64 returns the numeric value of this constant truncated to fit
+// a float64.
+//
+func (c *Const) Float64() float64 {
+ f, _ := exact.Float64Val(c.Value)
+ return f
+}
+
+// Complex128 returns the complex value of this constant truncated to
+// fit a complex128.
+//
+func (c *Const) Complex128() complex128 {
+ re, _ := exact.Float64Val(exact.Real(c.Value))
+ im, _ := exact.Float64Val(exact.Imag(c.Value))
+ return complex(re, im)
+}
diff --git a/llgo/third_party/go.tools/go/ssa/create.go b/llgo/third_party/go.tools/go/ssa/create.go
new file mode 100644
index 0000000000000000000000000000000000000000..835663760cfd2ac1d6771d656708142a2afa4192
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/create.go
@@ -0,0 +1,278 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file implements the CREATE phase of SSA construction.
+// See builder.go for explanation.
+
+import (
+ "go/ast"
+ "go/token"
+ "os"
+ "sync"
+
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// BuilderMode is a bitmask of options for diagnostics and checking.
+type BuilderMode uint
+
+const (
+ PrintPackages BuilderMode = 1 << iota // Print package inventory to stdout
+ PrintFunctions // Print function SSA code to stdout
+ LogSource // Log source locations as SSA builder progresses
+ SanityCheckFunctions // Perform sanity checking of function bodies
+ NaiveForm // Build naïve SSA form: don't replace local loads/stores with registers
+ BuildSerially // Build packages serially, not in parallel.
+ GlobalDebug // Enable debug info for all packages
+ BareInits // Build init functions without guards or calls to dependent inits
+)
+
+// Create returns a new SSA Program. An SSA Package is created for
+// each transitively error-free package of iprog.
+//
+// Code for bodies of functions is not built until Build() is called
+// on the result.
+//
+// mode controls diagnostics and checking during SSA construction.
+//
+func Create(iprog *loader.Program, mode BuilderMode) *Program {
+ prog := &Program{
+ Fset: iprog.Fset,
+ imported: make(map[string]*Package),
+ packages: make(map[*types.Package]*Package),
+ thunks: make(map[selectionKey]*Function),
+ bounds: make(map[*types.Func]*Function),
+ mode: mode,
+ }
+
+ for _, info := range iprog.AllPackages {
+ // TODO(adonovan): relax this constraint if the
+ // program contains only "soft" errors.
+ if info.TransitivelyErrorFree {
+ prog.CreatePackage(info)
+ }
+ }
+
+ return prog
+}
+
+// memberFromObject populates package pkg with a member for the
+// typechecker object obj.
+//
+// For objects from Go source code, syntax is the associated syntax
+// tree (for funcs and vars only); it will be used during the build
+// phase.
+//
+func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) {
+ name := obj.Name()
+ switch obj := obj.(type) {
+ case *types.TypeName:
+ pkg.Members[name] = &Type{
+ object: obj,
+ pkg: pkg,
+ }
+
+ case *types.Const:
+ c := &NamedConst{
+ object: obj,
+ Value: NewConst(obj.Val(), obj.Type()),
+ pkg: pkg,
+ }
+ pkg.values[obj] = c.Value
+ pkg.Members[name] = c
+
+ case *types.Var:
+ g := &Global{
+ Pkg: pkg,
+ name: name,
+ object: obj,
+ typ: types.NewPointer(obj.Type()), // address
+ pos: obj.Pos(),
+ }
+ pkg.values[obj] = g
+ pkg.Members[name] = g
+
+ case *types.Func:
+ fn := &Function{
+ name: name,
+ object: obj,
+ Signature: obj.Type().(*types.Signature),
+ syntax: syntax,
+ pos: obj.Pos(),
+ Pkg: pkg,
+ Prog: pkg.Prog,
+ }
+ if syntax == nil {
+ fn.Synthetic = "loaded from gc object file"
+ }
+
+ pkg.values[obj] = fn
+ if fn.Signature.Recv() == nil {
+ pkg.Members[name] = fn // package-level function
+ }
+
+ default: // (incl. *types.Package)
+ panic("unexpected Object type: " + obj.String())
+ }
+}
+
+// membersFromDecl populates package pkg with members for each
+// typechecker object (var, func, const or type) associated with the
+// specified decl.
+//
+func membersFromDecl(pkg *Package, decl ast.Decl) {
+ switch decl := decl.(type) {
+ case *ast.GenDecl: // import, const, type or var
+ switch decl.Tok {
+ case token.CONST:
+ for _, spec := range decl.Specs {
+ for _, id := range spec.(*ast.ValueSpec).Names {
+ if !isBlankIdent(id) {
+ memberFromObject(pkg, pkg.info.Defs[id], nil)
+ }
+ }
+ }
+
+ case token.VAR:
+ for _, spec := range decl.Specs {
+ for _, id := range spec.(*ast.ValueSpec).Names {
+ if !isBlankIdent(id) {
+ memberFromObject(pkg, pkg.info.Defs[id], spec)
+ }
+ }
+ }
+
+ case token.TYPE:
+ for _, spec := range decl.Specs {
+ id := spec.(*ast.TypeSpec).Name
+ if !isBlankIdent(id) {
+ memberFromObject(pkg, pkg.info.Defs[id], nil)
+ }
+ }
+ }
+
+ case *ast.FuncDecl:
+ id := decl.Name
+ if decl.Recv == nil && id.Name == "init" {
+ return // no object
+ }
+ if !isBlankIdent(id) {
+ memberFromObject(pkg, pkg.info.Defs[id], decl)
+ }
+ }
+}
+
+// CreatePackage constructs and returns an SSA Package from an
+// error-free package described by info, and populates its Members
+// mapping.
+//
+// Repeated calls with the same info return the same Package.
+//
+// The real work of building SSA form for each function is not done
+// until a subsequent call to Package.Build().
+//
+func (prog *Program) CreatePackage(info *loader.PackageInfo) *Package {
+ if p := prog.packages[info.Pkg]; p != nil {
+ return p // already loaded
+ }
+
+ p := &Package{
+ Prog: prog,
+ Members: make(map[string]Member),
+ values: make(map[types.Object]Value),
+ Object: info.Pkg,
+ info: info, // transient (CREATE and BUILD phases)
+ }
+
+ // Add init() function.
+ p.init = &Function{
+ name: "init",
+ Signature: new(types.Signature),
+ Synthetic: "package initializer",
+ Pkg: p,
+ Prog: prog,
+ }
+ p.Members[p.init.name] = p.init
+
+ // CREATE phase.
+ // Allocate all package members: vars, funcs, consts and types.
+ if len(info.Files) > 0 {
+ // Go source package.
+ for _, file := range info.Files {
+ for _, decl := range file.Decls {
+ membersFromDecl(p, decl)
+ }
+ }
+ } else {
+ // GC-compiled binary package.
+ // No code.
+ // No position information.
+ scope := p.Object.Scope()
+ for _, name := range scope.Names() {
+ obj := scope.Lookup(name)
+ memberFromObject(p, obj, nil)
+ if obj, ok := obj.(*types.TypeName); ok {
+ named := obj.Type().(*types.Named)
+ for i, n := 0, named.NumMethods(); i < n; i++ {
+ memberFromObject(p, named.Method(i), nil)
+ }
+ }
+ }
+ }
+
+ if prog.mode&BareInits == 0 {
+ // Add initializer guard variable.
+ initguard := &Global{
+ Pkg: p,
+ name: "init$guard",
+ typ: types.NewPointer(tBool),
+ }
+ p.Members[initguard.Name()] = initguard
+ }
+
+ if prog.mode&GlobalDebug != 0 {
+ p.SetDebugMode(true)
+ }
+
+ if prog.mode&PrintPackages != 0 {
+ printMu.Lock()
+ p.WriteTo(os.Stdout)
+ printMu.Unlock()
+ }
+
+ if info.Importable {
+ prog.imported[info.Pkg.Path()] = p
+ }
+ prog.packages[p.Object] = p
+
+ return p
+}
+
+// printMu serializes printing of Packages/Functions to stdout
+var printMu sync.Mutex
+
+// AllPackages returns a new slice containing all packages in the
+// program prog in unspecified order.
+//
+func (prog *Program) AllPackages() []*Package {
+ pkgs := make([]*Package, 0, len(prog.packages))
+ for _, pkg := range prog.packages {
+ pkgs = append(pkgs, pkg)
+ }
+ return pkgs
+}
+
+// ImportedPackage returns the importable SSA Package whose import
+// path is path, or nil if no such SSA package has been created.
+//
+// Not all packages are importable. For example, no import
+// declaration can resolve to the x_test package created by 'go test'
+// or the ad-hoc main package created 'go build foo.go'.
+//
+func (prog *Program) ImportedPackage(path string) *Package {
+ return prog.imported[path]
+}
diff --git a/llgo/third_party/go.tools/go/ssa/doc.go b/llgo/third_party/go.tools/go/ssa/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..0b5c33df5231c922c67ac0bea53d16876a90b2c8
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/doc.go
@@ -0,0 +1,123 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ssa defines a representation of the elements of Go programs
+// (packages, types, functions, variables and constants) using a
+// static single-assignment (SSA) form intermediate representation
+// (IR) for the bodies of functions.
+//
+// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE.
+//
+// For an introduction to SSA form, see
+// http://en.wikipedia.org/wiki/Static_single_assignment_form.
+// This page provides a broader reading list:
+// http://www.dcs.gla.ac.uk/~jsinger/ssa.html.
+//
+// The level of abstraction of the SSA form is intentionally close to
+// the source language to facilitate construction of source analysis
+// tools. It is not intended for machine code generation.
+//
+// All looping, branching and switching constructs are replaced with
+// unstructured control flow. Higher-level control flow constructs
+// such as multi-way branch can be reconstructed as needed; see
+// ssautil.Switches() for an example.
+//
+// To construct an SSA-form program, call ssa.Create on a
+// loader.Program, a set of type-checked packages created from
+// parsed Go source files. The resulting ssa.Program contains all the
+// packages and their members, but SSA code is not created for
+// function bodies until a subsequent call to (*Package).Build.
+//
+// The builder initially builds a naive SSA form in which all local
+// variables are addresses of stack locations with explicit loads and
+// stores. Registerisation of eligible locals and φ-node insertion
+// using dominance and dataflow are then performed as a second pass
+// called "lifting" to improve the accuracy and performance of
+// subsequent analyses; this pass can be skipped by setting the
+// NaiveForm builder flag.
+//
+// The primary interfaces of this package are:
+//
+// - Member: a named member of a Go package.
+// - Value: an expression that yields a value.
+// - Instruction: a statement that consumes values and performs computation.
+// - Node: a Value or Instruction (emphasizing its membership in the SSA value graph)
+//
+// A computation that yields a result implements both the Value and
+// Instruction interfaces. The following table shows for each
+// concrete type which of these interfaces it implements.
+//
+// Value? Instruction? Member?
+// *Alloc ✔ ✔
+// *BinOp ✔ ✔
+// *Builtin ✔
+// *Call ✔ ✔
+// *ChangeInterface ✔ ✔
+// *ChangeType ✔ ✔
+// *Const ✔
+// *Convert ✔ ✔
+// *DebugRef ✔
+// *Defer ✔
+// *Extract ✔ ✔
+// *Field ✔ ✔
+// *FieldAddr ✔ ✔
+// *FreeVar ✔
+// *Function ✔ ✔ (func)
+// *Global ✔ ✔ (var)
+// *Go ✔
+// *If ✔
+// *Index ✔ ✔
+// *IndexAddr ✔ ✔
+// *Jump ✔
+// *Lookup ✔ ✔
+// *MakeChan ✔ ✔
+// *MakeClosure ✔ ✔
+// *MakeInterface ✔ ✔
+// *MakeMap ✔ ✔
+// *MakeSlice ✔ ✔
+// *MapUpdate ✔
+// *NamedConst ✔ (const)
+// *Next ✔ ✔
+// *Panic ✔
+// *Parameter ✔
+// *Phi ✔ ✔
+// *Range ✔ ✔
+// *Return ✔
+// *RunDefers ✔
+// *Select ✔ ✔
+// *Send ✔
+// *Slice ✔ ✔
+// *Store ✔
+// *Type ✔ (type)
+// *TypeAssert ✔ ✔
+// *UnOp ✔ ✔
+//
+// Other key types in this package include: Program, Package, Function
+// and BasicBlock.
+//
+// The program representation constructed by this package is fully
+// resolved internally, i.e. it does not rely on the names of Values,
+// Packages, Functions, Types or BasicBlocks for the correct
+// interpretation of the program. Only the identities of objects and
+// the topology of the SSA and type graphs are semantically
+// significant. (There is one exception: Ids, used to identify field
+// and method names, contain strings.) Avoidance of name-based
+// operations simplifies the implementation of subsequent passes and
+// can make them very efficient. Many objects are nonetheless named
+// to aid in debugging, but it is not essential that the names be
+// either accurate or unambiguous. The public API exposes a number of
+// name-based maps for client convenience.
+//
+// The ssa/ssautil package provides various utilities that depend only
+// on the public API of this package.
+//
+// TODO(adonovan): Consider the exceptional control-flow implications
+// of defer and recover().
+//
+// TODO(adonovan): write a how-to document for all the various cases
+// of trying to determine corresponding elements across the four
+// domains of source locations, ast.Nodes, types.Objects,
+// ssa.Values/Instructions.
+//
+package ssa
diff --git a/llgo/third_party/go.tools/go/ssa/dom.go b/llgo/third_party/go.tools/go/ssa/dom.go
new file mode 100644
index 0000000000000000000000000000000000000000..12ef4308f3c07668ef438273f6687812b6da3a65
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/dom.go
@@ -0,0 +1,341 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file defines algorithms related to dominance.
+
+// Dominator tree construction ----------------------------------------
+//
+// We use the algorithm described in Lengauer & Tarjan. 1979. A fast
+// algorithm for finding dominators in a flowgraph.
+// http://doi.acm.org/10.1145/357062.357071
+//
+// We also apply the optimizations to SLT described in Georgiadis et
+// al, Finding Dominators in Practice, JGAA 2006,
+// http://jgaa.info/accepted/2006/GeorgiadisTarjanWerneck2006.10.1.pdf
+// to avoid the need for buckets of size > 1.
+
+import (
+ "bytes"
+ "fmt"
+ "math/big"
+ "os"
+ "sort"
+)
+
+// Idom returns the block that immediately dominates b:
+// its parent in the dominator tree, if any.
+// Neither the entry node (b.Index==0) nor recover node
+// (b==b.Parent().Recover()) have a parent.
+//
+func (b *BasicBlock) Idom() *BasicBlock { return b.dom.idom }
+
+// Dominees returns the list of blocks that b immediately dominates:
+// its children in the dominator tree.
+//
+func (b *BasicBlock) Dominees() []*BasicBlock { return b.dom.children }
+
+// Dominates reports whether b dominates c.
+func (b *BasicBlock) Dominates(c *BasicBlock) bool {
+ return b.dom.pre <= c.dom.pre && c.dom.post <= b.dom.post
+}
+
+type byDomPreorder []*BasicBlock
+
+func (a byDomPreorder) Len() int { return len(a) }
+func (a byDomPreorder) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byDomPreorder) Less(i, j int) bool { return a[i].dom.pre < a[j].dom.pre }
+
+// DomPreorder returns a new slice containing the blocks of f in
+// dominator tree preorder.
+//
+func (f *Function) DomPreorder() []*BasicBlock {
+ n := len(f.Blocks)
+ order := make(byDomPreorder, n, n)
+ copy(order, f.Blocks)
+ sort.Sort(order)
+ return order
+}
+
+// domInfo contains a BasicBlock's dominance information.
+type domInfo struct {
+ idom *BasicBlock // immediate dominator (parent in domtree)
+ children []*BasicBlock // nodes immediately dominated by this one
+ pre, post int32 // pre- and post-order numbering within domtree
+}
+
+// ltState holds the working state for Lengauer-Tarjan algorithm
+// (during which domInfo.pre is repurposed for CFG DFS preorder number).
+type ltState struct {
+ // Each slice is indexed by b.Index.
+ sdom []*BasicBlock // b's semidominator
+ parent []*BasicBlock // b's parent in DFS traversal of CFG
+ ancestor []*BasicBlock // b's ancestor with least sdom
+}
+
+// dfs implements the depth-first search part of the LT algorithm.
+func (lt *ltState) dfs(v *BasicBlock, i int32, preorder []*BasicBlock) int32 {
+ preorder[i] = v
+ v.dom.pre = i // For now: DFS preorder of spanning tree of CFG
+ i++
+ lt.sdom[v.Index] = v
+ lt.link(nil, v)
+ for _, w := range v.Succs {
+ if lt.sdom[w.Index] == nil {
+ lt.parent[w.Index] = v
+ i = lt.dfs(w, i, preorder)
+ }
+ }
+ return i
+}
+
+// eval implements the EVAL part of the LT algorithm.
+func (lt *ltState) eval(v *BasicBlock) *BasicBlock {
+ // TODO(adonovan): opt: do path compression per simple LT.
+ u := v
+ for ; lt.ancestor[v.Index] != nil; v = lt.ancestor[v.Index] {
+ if lt.sdom[v.Index].dom.pre < lt.sdom[u.Index].dom.pre {
+ u = v
+ }
+ }
+ return u
+}
+
+// link implements the LINK part of the LT algorithm.
+func (lt *ltState) link(v, w *BasicBlock) {
+ lt.ancestor[w.Index] = v
+}
+
+// buildDomTree computes the dominator tree of f using the LT algorithm.
+// Precondition: all blocks are reachable (e.g. optimizeBlocks has been run).
+//
+func buildDomTree(f *Function) {
+ // The step numbers refer to the original LT paper; the
+ // reordering is due to Georgiadis.
+
+ // Clear any previous domInfo.
+ for _, b := range f.Blocks {
+ b.dom = domInfo{}
+ }
+
+ n := len(f.Blocks)
+ // Allocate space for 5 contiguous [n]*BasicBlock arrays:
+ // sdom, parent, ancestor, preorder, buckets.
+ space := make([]*BasicBlock, 5*n, 5*n)
+ lt := ltState{
+ sdom: space[0:n],
+ parent: space[n : 2*n],
+ ancestor: space[2*n : 3*n],
+ }
+
+ // Step 1. Number vertices by depth-first preorder.
+ preorder := space[3*n : 4*n]
+ root := f.Blocks[0]
+ prenum := lt.dfs(root, 0, preorder)
+ recover := f.Recover
+ if recover != nil {
+ lt.dfs(recover, prenum, preorder)
+ }
+
+ buckets := space[4*n : 5*n]
+ copy(buckets, preorder)
+
+ // In reverse preorder...
+ for i := int32(n) - 1; i > 0; i-- {
+ w := preorder[i]
+
+ // Step 3. Implicitly define the immediate dominator of each node.
+ for v := buckets[i]; v != w; v = buckets[v.dom.pre] {
+ u := lt.eval(v)
+ if lt.sdom[u.Index].dom.pre < i {
+ v.dom.idom = u
+ } else {
+ v.dom.idom = w
+ }
+ }
+
+ // Step 2. Compute the semidominators of all nodes.
+ lt.sdom[w.Index] = lt.parent[w.Index]
+ for _, v := range w.Preds {
+ u := lt.eval(v)
+ if lt.sdom[u.Index].dom.pre < lt.sdom[w.Index].dom.pre {
+ lt.sdom[w.Index] = lt.sdom[u.Index]
+ }
+ }
+
+ lt.link(lt.parent[w.Index], w)
+
+ if lt.parent[w.Index] == lt.sdom[w.Index] {
+ w.dom.idom = lt.parent[w.Index]
+ } else {
+ buckets[i] = buckets[lt.sdom[w.Index].dom.pre]
+ buckets[lt.sdom[w.Index].dom.pre] = w
+ }
+ }
+
+ // The final 'Step 3' is now outside the loop.
+ for v := buckets[0]; v != root; v = buckets[v.dom.pre] {
+ v.dom.idom = root
+ }
+
+ // Step 4. Explicitly define the immediate dominator of each
+ // node, in preorder.
+ for _, w := range preorder[1:] {
+ if w == root || w == recover {
+ w.dom.idom = nil
+ } else {
+ if w.dom.idom != lt.sdom[w.Index] {
+ w.dom.idom = w.dom.idom.dom.idom
+ }
+ // Calculate Children relation as inverse of Idom.
+ w.dom.idom.dom.children = append(w.dom.idom.dom.children, w)
+ }
+ }
+
+ pre, post := numberDomTree(root, 0, 0)
+ if recover != nil {
+ numberDomTree(recover, pre, post)
+ }
+
+ // printDomTreeDot(os.Stderr, f) // debugging
+ // printDomTreeText(os.Stderr, root, 0) // debugging
+
+ if f.Prog.mode&SanityCheckFunctions != 0 {
+ sanityCheckDomTree(f)
+ }
+}
+
+// numberDomTree sets the pre- and post-order numbers of a depth-first
+// traversal of the dominator tree rooted at v. These are used to
+// answer dominance queries in constant time.
+//
+func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) {
+ v.dom.pre = pre
+ pre++
+ for _, child := range v.dom.children {
+ pre, post = numberDomTree(child, pre, post)
+ }
+ v.dom.post = post
+ post++
+ return pre, post
+}
+
+// Testing utilities ----------------------------------------
+
+// sanityCheckDomTree checks the correctness of the dominator tree
+// computed by the LT algorithm by comparing against the dominance
+// relation computed by a naive Kildall-style forward dataflow
+// analysis (Algorithm 10.16 from the "Dragon" book).
+//
+func sanityCheckDomTree(f *Function) {
+ n := len(f.Blocks)
+
+ // D[i] is the set of blocks that dominate f.Blocks[i],
+ // represented as a bit-set of block indices.
+ D := make([]big.Int, n)
+
+ one := big.NewInt(1)
+
+ // all is the set of all blocks; constant.
+ var all big.Int
+ all.Set(one).Lsh(&all, uint(n)).Sub(&all, one)
+
+ // Initialization.
+ for i, b := range f.Blocks {
+ if i == 0 || b == f.Recover {
+ // A root is dominated only by itself.
+ D[i].SetBit(&D[0], 0, 1)
+ } else {
+ // All other blocks are (initially) dominated
+ // by every block.
+ D[i].Set(&all)
+ }
+ }
+
+ // Iteration until fixed point.
+ for changed := true; changed; {
+ changed = false
+ for i, b := range f.Blocks {
+ if i == 0 || b == f.Recover {
+ continue
+ }
+ // Compute intersection across predecessors.
+ var x big.Int
+ x.Set(&all)
+ for _, pred := range b.Preds {
+ x.And(&x, &D[pred.Index])
+ }
+ x.SetBit(&x, i, 1) // a block always dominates itself.
+ if D[i].Cmp(&x) != 0 {
+ D[i].Set(&x)
+ changed = true
+ }
+ }
+ }
+
+ // Check the entire relation. O(n^2).
+ // The Recover block (if any) must be treated specially so we skip it.
+ ok := true
+ for i := 0; i < n; i++ {
+ for j := 0; j < n; j++ {
+ b, c := f.Blocks[i], f.Blocks[j]
+ if c == f.Recover {
+ continue
+ }
+ actual := b.Dominates(c)
+ expected := D[j].Bit(i) == 1
+ if actual != expected {
+ fmt.Fprintf(os.Stderr, "dominates(%s, %s)==%t, want %t\n", b, c, actual, expected)
+ ok = false
+ }
+ }
+ }
+
+ preorder := f.DomPreorder()
+ for _, b := range f.Blocks {
+ if got := preorder[b.dom.pre]; got != b {
+ fmt.Fprintf(os.Stderr, "preorder[%d]==%s, want %s\n", b.dom.pre, got, b)
+ ok = false
+ }
+ }
+
+ if !ok {
+ panic("sanityCheckDomTree failed for " + f.String())
+ }
+
+}
+
+// Printing functions ----------------------------------------
+
+// printDomTree prints the dominator tree as text, using indentation.
+func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) {
+ fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v)
+ for _, child := range v.dom.children {
+ printDomTreeText(buf, child, indent+1)
+ }
+}
+
+// printDomTreeDot prints the dominator tree of f in AT&T GraphViz
+// (.dot) format.
+func printDomTreeDot(buf *bytes.Buffer, f *Function) {
+ fmt.Fprintln(buf, "//", f)
+ fmt.Fprintln(buf, "digraph domtree {")
+ for i, b := range f.Blocks {
+ v := b.dom
+ fmt.Fprintf(buf, "\tn%d [label=\"%s (%d, %d)\",shape=\"rectangle\"];\n", v.pre, b, v.pre, v.post)
+ // TODO(adonovan): improve appearance of edges
+ // belonging to both dominator tree and CFG.
+
+ // Dominator tree edge.
+ if i != 0 {
+ fmt.Fprintf(buf, "\tn%d -> n%d [style=\"solid\",weight=100];\n", v.idom.dom.pre, v.pre)
+ }
+ // CFG edges.
+ for _, pred := range b.Preds {
+ fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0];\n", pred.dom.pre, v.pre)
+ }
+ }
+ fmt.Fprintln(buf, "}")
+}
diff --git a/llgo/third_party/go.tools/go/ssa/emit.go b/llgo/third_party/go.tools/go/ssa/emit.go
new file mode 100644
index 0000000000000000000000000000000000000000..4cc32ead082ff05ef494d6d9ab3c4962f4187601
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/emit.go
@@ -0,0 +1,474 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// Helpers for emitting SSA instructions.
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// emitNew emits to f a new (heap Alloc) instruction allocating an
+// object of type typ. pos is the optional source location.
+//
+func emitNew(f *Function, typ types.Type, pos token.Pos) *Alloc {
+ v := &Alloc{Heap: true}
+ v.setType(types.NewPointer(typ))
+ v.setPos(pos)
+ f.emit(v)
+ return v
+}
+
+// emitLoad emits to f an instruction to load the address addr into a
+// new temporary, and returns the value so defined.
+//
+func emitLoad(f *Function, addr Value) *UnOp {
+ v := &UnOp{Op: token.MUL, X: addr}
+ v.setType(deref(addr.Type()))
+ f.emit(v)
+ return v
+}
+
+// emitDebugRef emits to f a DebugRef pseudo-instruction associating
+// expression e with value v.
+//
+func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) {
+ if !f.debugInfo() {
+ return // debugging not enabled
+ }
+ if v == nil || e == nil {
+ panic("nil")
+ }
+ var obj types.Object
+ e = unparen(e)
+ if id, ok := e.(*ast.Ident); ok {
+ if isBlankIdent(id) {
+ return
+ }
+ obj = f.Pkg.objectOf(id)
+ switch obj.(type) {
+ case *types.Nil, *types.Const, *types.Builtin:
+ return
+ }
+ }
+ f.emit(&DebugRef{
+ X: v,
+ Expr: e,
+ IsAddr: isAddr,
+ object: obj,
+ })
+}
+
+// emitArith emits to f code to compute the binary operation op(x, y)
+// where op is an eager shift, logical or arithmetic operation.
+// (Use emitCompare() for comparisons and Builder.logicalBinop() for
+// non-eager operations.)
+//
+func emitArith(f *Function, op token.Token, x, y Value, t types.Type, pos token.Pos) Value {
+ switch op {
+ case token.SHL, token.SHR:
+ x = emitConv(f, x, t)
+ // y may be signed or an 'untyped' constant.
+ // TODO(adonovan): whence signed values?
+ if b, ok := y.Type().Underlying().(*types.Basic); ok && b.Info()&types.IsUnsigned == 0 {
+ y = emitConv(f, y, types.Typ[types.Uint64])
+ }
+
+ case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT:
+ x = emitConv(f, x, t)
+ y = emitConv(f, y, t)
+
+ default:
+ panic("illegal op in emitArith: " + op.String())
+
+ }
+ v := &BinOp{
+ Op: op,
+ X: x,
+ Y: y,
+ }
+ v.setPos(pos)
+ v.setType(t)
+ return f.emit(v)
+}
+
+// emitCompare emits to f code compute the boolean result of
+// comparison comparison 'x op y'.
+//
+func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value {
+ xt := x.Type().Underlying()
+ yt := y.Type().Underlying()
+
+ // Special case to optimise a tagless SwitchStmt so that
+ // these are equivalent
+ // switch { case e: ...}
+ // switch true { case e: ... }
+ // if e==true { ... }
+ // even in the case when e's type is an interface.
+ // TODO(adonovan): opt: generalise to x==true, false!=y, etc.
+ if x == vTrue && op == token.EQL {
+ if yt, ok := yt.(*types.Basic); ok && yt.Info()&types.IsBoolean != 0 {
+ return y
+ }
+ }
+
+ if types.Identical(xt, yt) {
+ // no conversion necessary
+ } else if _, ok := xt.(*types.Interface); ok {
+ y = emitConv(f, y, x.Type())
+ } else if _, ok := yt.(*types.Interface); ok {
+ x = emitConv(f, x, y.Type())
+ } else if _, ok := x.(*Const); ok {
+ x = emitConv(f, x, y.Type())
+ } else if _, ok := y.(*Const); ok {
+ y = emitConv(f, y, x.Type())
+ } else {
+ // other cases, e.g. channels. No-op.
+ }
+
+ v := &BinOp{
+ Op: op,
+ X: x,
+ Y: y,
+ }
+ v.setPos(pos)
+ v.setType(tBool)
+ return f.emit(v)
+}
+
+// isValuePreserving returns true if a conversion from ut_src to
+// ut_dst is value-preserving, i.e. just a change of type.
+// Precondition: neither argument is a named type.
+//
+func isValuePreserving(ut_src, ut_dst types.Type) bool {
+ // Identical underlying types?
+ if types.Identical(ut_dst, ut_src) {
+ return true
+ }
+
+ switch ut_dst.(type) {
+ case *types.Chan:
+ // Conversion between channel types?
+ _, ok := ut_src.(*types.Chan)
+ return ok
+
+ case *types.Pointer:
+ // Conversion between pointers with identical base types?
+ _, ok := ut_src.(*types.Pointer)
+ return ok
+ }
+ return false
+}
+
+// emitConv emits to f code to convert Value val to exactly type typ,
+// and returns the converted value. Implicit conversions are required
+// by language assignability rules in assignments, parameter passing,
+// etc. Conversions cannot fail dynamically.
+//
+func emitConv(f *Function, val Value, typ types.Type) Value {
+ t_src := val.Type()
+
+ // Identical types? Conversion is a no-op.
+ if types.Identical(t_src, typ) {
+ return val
+ }
+
+ ut_dst := typ.Underlying()
+ ut_src := t_src.Underlying()
+
+ // Just a change of type, but not value or representation?
+ if isValuePreserving(ut_src, ut_dst) {
+ c := &ChangeType{X: val}
+ c.setType(typ)
+ return f.emit(c)
+ }
+
+ // Conversion to, or construction of a value of, an interface type?
+ if _, ok := ut_dst.(*types.Interface); ok {
+ // Assignment from one interface type to another?
+ if _, ok := ut_src.(*types.Interface); ok {
+ c := &ChangeInterface{X: val}
+ c.setType(typ)
+ return f.emit(c)
+ }
+
+ // Untyped nil constant? Return interface-typed nil constant.
+ if ut_src == tUntypedNil {
+ return nilConst(typ)
+ }
+
+ // Convert (non-nil) "untyped" literals to their default type.
+ if t, ok := ut_src.(*types.Basic); ok && t.Info()&types.IsUntyped != 0 {
+ val = emitConv(f, val, DefaultType(ut_src))
+ }
+
+ f.Pkg.needMethodsOf(val.Type())
+ mi := &MakeInterface{X: val}
+ mi.setType(typ)
+ return f.emit(mi)
+ }
+
+ // Conversion of a compile-time constant value?
+ if c, ok := val.(*Const); ok {
+ if _, ok := ut_dst.(*types.Basic); ok || c.IsNil() {
+ // Conversion of a compile-time constant to
+ // another constant type results in a new
+ // constant of the destination type and
+ // (initially) the same abstract value.
+ // We don't truncate the value yet.
+ return NewConst(c.Value, typ)
+ }
+
+ // We're converting from constant to non-constant type,
+ // e.g. string -> []byte/[]rune.
+ }
+
+ // A representation-changing conversion?
+ // At least one of {ut_src,ut_dst} must be *Basic.
+ // (The other may be []byte or []rune.)
+ _, ok1 := ut_src.(*types.Basic)
+ _, ok2 := ut_dst.(*types.Basic)
+ if ok1 || ok2 {
+ c := &Convert{X: val}
+ c.setType(typ)
+ return f.emit(c)
+ }
+
+ panic(fmt.Sprintf("in %s: cannot convert %s (%s) to %s", f, val, val.Type(), typ))
+}
+
+// emitStore emits to f an instruction to store value val at location
+// addr, applying implicit conversions as required by assignability rules.
+//
+func emitStore(f *Function, addr, val Value) *Store {
+ s := &Store{
+ Addr: addr,
+ Val: emitConv(f, val, deref(addr.Type())),
+ }
+ f.emit(s)
+ return s
+}
+
+// emitJump emits to f a jump to target, and updates the control-flow graph.
+// Postcondition: f.currentBlock is nil.
+//
+func emitJump(f *Function, target *BasicBlock) {
+ b := f.currentBlock
+ b.emit(new(Jump))
+ addEdge(b, target)
+ f.currentBlock = nil
+}
+
+// emitIf emits to f a conditional jump to tblock or fblock based on
+// cond, and updates the control-flow graph.
+// Postcondition: f.currentBlock is nil.
+//
+func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock) {
+ b := f.currentBlock
+ b.emit(&If{Cond: cond})
+ addEdge(b, tblock)
+ addEdge(b, fblock)
+ f.currentBlock = nil
+}
+
+// emitExtract emits to f an instruction to extract the index'th
+// component of tuple. It returns the extracted value.
+//
+func emitExtract(f *Function, tuple Value, index int) Value {
+ e := &Extract{Tuple: tuple, Index: index}
+ e.setType(tuple.Type().(*types.Tuple).At(index).Type())
+ return f.emit(e)
+}
+
+// emitTypeAssert emits to f a type assertion value := x.(t) and
+// returns the value. x.Type() must be an interface.
+//
+func emitTypeAssert(f *Function, x Value, t types.Type, pos token.Pos) Value {
+ a := &TypeAssert{X: x, AssertedType: t}
+ a.setPos(pos)
+ a.setType(t)
+ return f.emit(a)
+}
+
+// emitTypeTest emits to f a type test value,ok := x.(t) and returns
+// a (value, ok) tuple. x.Type() must be an interface.
+//
+func emitTypeTest(f *Function, x Value, t types.Type, pos token.Pos) Value {
+ a := &TypeAssert{
+ X: x,
+ AssertedType: t,
+ CommaOk: true,
+ }
+ a.setPos(pos)
+ a.setType(types.NewTuple(
+ newVar("value", t),
+ varOk,
+ ))
+ return f.emit(a)
+}
+
+// emitTailCall emits to f a function call in tail position. The
+// caller is responsible for all fields of 'call' except its type.
+// Intended for wrapper methods.
+// Precondition: f does/will not use deferred procedure calls.
+// Postcondition: f.currentBlock is nil.
+//
+func emitTailCall(f *Function, call *Call) {
+ tresults := f.Signature.Results()
+ nr := tresults.Len()
+ if nr == 1 {
+ call.typ = tresults.At(0).Type()
+ } else {
+ call.typ = tresults
+ }
+ tuple := f.emit(call)
+ var ret Return
+ switch nr {
+ case 0:
+ // no-op
+ case 1:
+ ret.Results = []Value{tuple}
+ default:
+ for i := 0; i < nr; i++ {
+ v := emitExtract(f, tuple, i)
+ // TODO(adonovan): in principle, this is required:
+ // v = emitConv(f, o.Type, f.Signature.Results[i].Type)
+ // but in practice emitTailCall is only used when
+ // the types exactly match.
+ ret.Results = append(ret.Results, v)
+ }
+ }
+ f.emit(&ret)
+ f.currentBlock = nil
+}
+
+// emitImplicitSelections emits to f code to apply the sequence of
+// implicit field selections specified by indices to base value v, and
+// returns the selected value.
+//
+// If v is the address of a struct, the result will be the address of
+// a field; if it is the value of a struct, the result will be the
+// value of a field.
+//
+func emitImplicitSelections(f *Function, v Value, indices []int) Value {
+ for _, index := range indices {
+ fld := deref(v.Type()).Underlying().(*types.Struct).Field(index)
+
+ if isPointer(v.Type()) {
+ instr := &FieldAddr{
+ X: v,
+ Field: index,
+ }
+ instr.setType(types.NewPointer(fld.Type()))
+ v = f.emit(instr)
+ // Load the field's value iff indirectly embedded.
+ if isPointer(fld.Type()) {
+ v = emitLoad(f, v)
+ }
+ } else {
+ instr := &Field{
+ X: v,
+ Field: index,
+ }
+ instr.setType(fld.Type())
+ v = f.emit(instr)
+ }
+ }
+ return v
+}
+
+// emitFieldSelection emits to f code to select the index'th field of v.
+//
+// If wantAddr, the input must be a pointer-to-struct and the result
+// will be the field's address; otherwise the result will be the
+// field's value.
+// Ident id is used for position and debug info.
+//
+func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value {
+ fld := deref(v.Type()).Underlying().(*types.Struct).Field(index)
+ if isPointer(v.Type()) {
+ instr := &FieldAddr{
+ X: v,
+ Field: index,
+ }
+ instr.setPos(id.Pos())
+ instr.setType(types.NewPointer(fld.Type()))
+ v = f.emit(instr)
+ // Load the field's value iff we don't want its address.
+ if !wantAddr {
+ v = emitLoad(f, v)
+ }
+ } else {
+ instr := &Field{
+ X: v,
+ Field: index,
+ }
+ instr.setPos(id.Pos())
+ instr.setType(fld.Type())
+ v = f.emit(instr)
+ }
+ emitDebugRef(f, id, v, wantAddr)
+ return v
+}
+
+// zeroValue emits to f code to produce a zero value of type t,
+// and returns it.
+//
+func zeroValue(f *Function, t types.Type) Value {
+ switch t.Underlying().(type) {
+ case *types.Struct, *types.Array:
+ return emitLoad(f, f.addLocal(t, token.NoPos))
+ default:
+ return zeroConst(t)
+ }
+}
+
+// emitMemClear emits to f code to zero the value pointed to by ptr.
+func emitMemClear(f *Function, ptr Value) {
+ // TODO(adonovan): define and use a 'memclr' intrinsic for aggregate types.
+ emitStore(f, ptr, zeroValue(f, deref(ptr.Type())))
+}
+
+// createRecoverBlock emits to f a block of code to return after a
+// recovered panic, and sets f.Recover to it.
+//
+// If f's result parameters are named, the code loads and returns
+// their current values, otherwise it returns the zero values of their
+// type.
+//
+// Idempotent.
+//
+func createRecoverBlock(f *Function) {
+ if f.Recover != nil {
+ return // already created
+ }
+ saved := f.currentBlock
+
+ f.Recover = f.newBasicBlock("recover")
+ f.currentBlock = f.Recover
+
+ var results []Value
+ if f.namedResults != nil {
+ // Reload NRPs to form value tuple.
+ for _, r := range f.namedResults {
+ results = append(results, emitLoad(f, r))
+ }
+ } else {
+ R := f.Signature.Results()
+ for i, n := 0, R.Len(); i < n; i++ {
+ T := R.At(i).Type()
+
+ // Return zero value of each result type.
+ results = append(results, zeroValue(f, T))
+ }
+ }
+ f.emit(&Return{Results: results})
+
+ f.currentBlock = saved
+}
diff --git a/llgo/third_party/go.tools/go/ssa/example_test.go b/llgo/third_party/go.tools/go/ssa/example_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..85eb3afc634643777e3d9ed8fa7200ad0898e82f
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/example_test.go
@@ -0,0 +1,110 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+import (
+ "fmt"
+ "os"
+
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+)
+
+// This program demonstrates how to run the SSA builder on a "Hello,
+// World!" program and shows the printed representation of packages,
+// functions and instructions.
+//
+// Within the function listing, the name of each BasicBlock such as
+// ".0.entry" is printed left-aligned, followed by the block's
+// Instructions.
+//
+// For each instruction that defines an SSA virtual register
+// (i.e. implements Value), the type of that value is shown in the
+// right column.
+//
+// Build and run the ssadump.go program if you want a standalone tool
+// with similar functionality. It is located at
+// golang.org/x/tools/cmd/ssadump.
+//
+func Example() {
+ const hello = `
+package main
+
+import "fmt"
+
+const message = "Hello, World!"
+
+func main() {
+ fmt.Println(message)
+}
+`
+ var conf loader.Config
+
+ // Parse the input file.
+ file, err := conf.ParseFile("hello.go", hello)
+ if err != nil {
+ fmt.Print(err) // parse error
+ return
+ }
+
+ // Create single-file main package.
+ conf.CreateFromFiles("main", file)
+
+ // Load the main package and its dependencies.
+ iprog, err := conf.Load()
+ if err != nil {
+ fmt.Print(err) // type error in some package
+ return
+ }
+
+ // Create SSA-form program representation.
+ prog := ssa.Create(iprog, ssa.SanityCheckFunctions)
+ mainPkg := prog.Package(iprog.Created[0].Pkg)
+
+ // Print out the package.
+ mainPkg.WriteTo(os.Stdout)
+
+ // Build SSA code for bodies of functions in mainPkg.
+ mainPkg.Build()
+
+ // Print out the package-level functions.
+ mainPkg.Func("init").WriteTo(os.Stdout)
+ mainPkg.Func("main").WriteTo(os.Stdout)
+
+ // Output:
+ //
+ // package main:
+ // func init func()
+ // var init$guard bool
+ // func main func()
+ // const message message = "Hello, World!":untyped string
+ //
+ // # Name: main.init
+ // # Package: main
+ // # Synthetic: package initializer
+ // func init():
+ // 0: entry P:0 S:2
+ // t0 = *init$guard bool
+ // if t0 goto 2 else 1
+ // 1: init.start P:1 S:1
+ // *init$guard = true:bool
+ // t1 = fmt.init() ()
+ // jump 2
+ // 2: init.done P:2 S:0
+ // return
+ //
+ // # Name: main.main
+ // # Package: main
+ // # Location: hello.go:8:6
+ // func main():
+ // 0: entry P:0 S:0
+ // t0 = new [1]interface{} (varargs) *[1]interface{}
+ // t1 = &t0[0:int] *interface{}
+ // t2 = make interface{} <- string ("Hello, World!":string) interface{}
+ // *t1 = t2
+ // t3 = slice t0[:] []interface{}
+ // t4 = fmt.Println(t3...) (n int, err error)
+ // return
+}
diff --git a/llgo/third_party/go.tools/go/ssa/func.go b/llgo/third_party/go.tools/go/ssa/func.go
new file mode 100644
index 0000000000000000000000000000000000000000..23eddf4711067208c451a00b204f1fc4f7245384
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/func.go
@@ -0,0 +1,687 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file implements the Function and BasicBlock types.
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "io"
+ "os"
+ "strings"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// addEdge adds a control-flow graph edge from from to to.
+func addEdge(from, to *BasicBlock) {
+ from.Succs = append(from.Succs, to)
+ to.Preds = append(to.Preds, from)
+}
+
+// Parent returns the function that contains block b.
+func (b *BasicBlock) Parent() *Function { return b.parent }
+
+// String returns a human-readable label of this block.
+// It is not guaranteed unique within the function.
+//
+func (b *BasicBlock) String() string {
+ return fmt.Sprintf("%d", b.Index)
+}
+
+// emit appends an instruction to the current basic block.
+// If the instruction defines a Value, it is returned.
+//
+func (b *BasicBlock) emit(i Instruction) Value {
+ i.setBlock(b)
+ b.Instrs = append(b.Instrs, i)
+ v, _ := i.(Value)
+ return v
+}
+
+// predIndex returns the i such that b.Preds[i] == c or panics if
+// there is none.
+func (b *BasicBlock) predIndex(c *BasicBlock) int {
+ for i, pred := range b.Preds {
+ if pred == c {
+ return i
+ }
+ }
+ panic(fmt.Sprintf("no edge %s -> %s", c, b))
+}
+
+// hasPhi returns true if b.Instrs contains φ-nodes.
+func (b *BasicBlock) hasPhi() bool {
+ _, ok := b.Instrs[0].(*Phi)
+ return ok
+}
+
+// phis returns the prefix of b.Instrs containing all the block's φ-nodes.
+func (b *BasicBlock) phis() []Instruction {
+ for i, instr := range b.Instrs {
+ if _, ok := instr.(*Phi); !ok {
+ return b.Instrs[:i]
+ }
+ }
+ return nil // unreachable in well-formed blocks
+}
+
+// replacePred replaces all occurrences of p in b's predecessor list with q.
+// Ordinarily there should be at most one.
+//
+func (b *BasicBlock) replacePred(p, q *BasicBlock) {
+ for i, pred := range b.Preds {
+ if pred == p {
+ b.Preds[i] = q
+ }
+ }
+}
+
+// replaceSucc replaces all occurrences of p in b's successor list with q.
+// Ordinarily there should be at most one.
+//
+func (b *BasicBlock) replaceSucc(p, q *BasicBlock) {
+ for i, succ := range b.Succs {
+ if succ == p {
+ b.Succs[i] = q
+ }
+ }
+}
+
+// removePred removes all occurrences of p in b's
+// predecessor list and φ-nodes.
+// Ordinarily there should be at most one.
+//
+func (b *BasicBlock) removePred(p *BasicBlock) {
+ phis := b.phis()
+
+ // We must preserve edge order for φ-nodes.
+ j := 0
+ for i, pred := range b.Preds {
+ if pred != p {
+ b.Preds[j] = b.Preds[i]
+ // Strike out φ-edge too.
+ for _, instr := range phis {
+ phi := instr.(*Phi)
+ phi.Edges[j] = phi.Edges[i]
+ }
+ j++
+ }
+ }
+ // Nil out b.Preds[j:] and φ-edges[j:] to aid GC.
+ for i := j; i < len(b.Preds); i++ {
+ b.Preds[i] = nil
+ for _, instr := range phis {
+ instr.(*Phi).Edges[i] = nil
+ }
+ }
+ b.Preds = b.Preds[:j]
+ for _, instr := range phis {
+ phi := instr.(*Phi)
+ phi.Edges = phi.Edges[:j]
+ }
+}
+
+// Destinations associated with unlabelled for/switch/select stmts.
+// We push/pop one of these as we enter/leave each construct and for
+// each BranchStmt we scan for the innermost target of the right type.
+//
+type targets struct {
+ tail *targets // rest of stack
+ _break *BasicBlock
+ _continue *BasicBlock
+ _fallthrough *BasicBlock
+}
+
+// Destinations associated with a labelled block.
+// We populate these as labels are encountered in forward gotos or
+// labelled statements.
+//
+type lblock struct {
+ _goto *BasicBlock
+ _break *BasicBlock
+ _continue *BasicBlock
+}
+
+// labelledBlock returns the branch target associated with the
+// specified label, creating it if needed.
+//
+func (f *Function) labelledBlock(label *ast.Ident) *lblock {
+ lb := f.lblocks[label.Obj]
+ if lb == nil {
+ lb = &lblock{_goto: f.newBasicBlock(label.Name)}
+ if f.lblocks == nil {
+ f.lblocks = make(map[*ast.Object]*lblock)
+ }
+ f.lblocks[label.Obj] = lb
+ }
+ return lb
+}
+
+// addParam adds a (non-escaping) parameter to f.Params of the
+// specified name, type and source position.
+//
+func (f *Function) addParam(name string, typ types.Type, pos token.Pos) *Parameter {
+ v := &Parameter{
+ name: name,
+ typ: typ,
+ pos: pos,
+ parent: f,
+ }
+ f.Params = append(f.Params, v)
+ return v
+}
+
+func (f *Function) addParamObj(obj types.Object) *Parameter {
+ name := obj.Name()
+ if name == "" {
+ name = fmt.Sprintf("arg%d", len(f.Params))
+ }
+ param := f.addParam(name, obj.Type(), obj.Pos())
+ param.object = obj
+ return param
+}
+
+// addSpilledParam declares a parameter that is pre-spilled to the
+// stack; the function body will load/store the spilled location.
+// Subsequent lifting will eliminate spills where possible.
+//
+func (f *Function) addSpilledParam(obj types.Object) {
+ param := f.addParamObj(obj)
+ spill := &Alloc{Comment: obj.Name()}
+ spill.setType(types.NewPointer(obj.Type()))
+ spill.setPos(obj.Pos())
+ f.objects[obj] = spill
+ f.Locals = append(f.Locals, spill)
+ f.emit(spill)
+ f.emit(&Store{Addr: spill, Val: param})
+}
+
+// startBody initializes the function prior to generating SSA code for its body.
+// Precondition: f.Type() already set.
+//
+func (f *Function) startBody() {
+ f.currentBlock = f.newBasicBlock("entry")
+ f.objects = make(map[types.Object]Value) // needed for some synthetics, e.g. init
+}
+
+// createSyntacticParams populates f.Params and generates code (spills
+// and named result locals) for all the parameters declared in the
+// syntax. In addition it populates the f.objects mapping.
+//
+// Preconditions:
+// f.startBody() was called.
+// Postcondition:
+// len(f.Params) == len(f.Signature.Params) + (f.Signature.Recv() ? 1 : 0)
+//
+func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.FuncType) {
+ // Receiver (at most one inner iteration).
+ if recv != nil {
+ for _, field := range recv.List {
+ for _, n := range field.Names {
+ f.addSpilledParam(f.Pkg.info.Defs[n])
+ }
+ // Anonymous receiver? No need to spill.
+ if field.Names == nil {
+ f.addParamObj(f.Signature.Recv())
+ }
+ }
+ }
+
+ // Parameters.
+ if functype.Params != nil {
+ n := len(f.Params) // 1 if has recv, 0 otherwise
+ for _, field := range functype.Params.List {
+ for _, n := range field.Names {
+ f.addSpilledParam(f.Pkg.info.Defs[n])
+ }
+ // Anonymous parameter? No need to spill.
+ if field.Names == nil {
+ f.addParamObj(f.Signature.Params().At(len(f.Params) - n))
+ }
+ }
+ }
+
+ // Named results.
+ if functype.Results != nil {
+ for _, field := range functype.Results.List {
+ // Implicit "var" decl of locals for named results.
+ for _, n := range field.Names {
+ f.namedResults = append(f.namedResults, f.addLocalForIdent(n))
+ }
+ }
+ }
+}
+
+// numberRegisters assigns numbers to all SSA registers
+// (value-defining Instructions) in f, to aid debugging.
+// (Non-Instruction Values are named at construction.)
+//
+func numberRegisters(f *Function) {
+ v := 0
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ switch instr.(type) {
+ case Value:
+ instr.(interface {
+ setNum(int)
+ }).setNum(v)
+ v++
+ }
+ }
+ }
+}
+
+// buildReferrers populates the def/use information in all non-nil
+// Value.Referrers slice.
+// Precondition: all such slices are initially empty.
+func buildReferrers(f *Function) {
+ var rands []*Value
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ rands = instr.Operands(rands[:0]) // recycle storage
+ for _, rand := range rands {
+ if r := *rand; r != nil {
+ if ref := r.Referrers(); ref != nil {
+ *ref = append(*ref, instr)
+ }
+ }
+ }
+ }
+ }
+}
+
+// finishBody() finalizes the function after SSA code generation of its body.
+func (f *Function) finishBody() {
+ f.objects = nil
+ f.currentBlock = nil
+ f.lblocks = nil
+
+ // Don't pin the AST in memory (except in debug mode).
+ if n := f.syntax; n != nil && !f.debugInfo() {
+ f.syntax = extentNode{n.Pos(), n.End()}
+ }
+
+ // Remove from f.Locals any Allocs that escape to the heap.
+ j := 0
+ for _, l := range f.Locals {
+ if !l.Heap {
+ f.Locals[j] = l
+ j++
+ }
+ }
+ // Nil out f.Locals[j:] to aid GC.
+ for i := j; i < len(f.Locals); i++ {
+ f.Locals[i] = nil
+ }
+ f.Locals = f.Locals[:j]
+
+ optimizeBlocks(f)
+
+ buildReferrers(f)
+
+ buildDomTree(f)
+
+ if f.Prog.mode&NaiveForm == 0 {
+ // For debugging pre-state of lifting pass:
+ // numberRegisters(f)
+ // f.WriteTo(os.Stderr)
+ lift(f)
+ }
+
+ f.namedResults = nil // (used by lifting)
+
+ numberRegisters(f)
+
+ if f.Prog.mode&PrintFunctions != 0 {
+ printMu.Lock()
+ f.WriteTo(os.Stdout)
+ printMu.Unlock()
+ }
+
+ if f.Prog.mode&SanityCheckFunctions != 0 {
+ mustSanityCheck(f, nil)
+ }
+}
+
+// removeNilBlocks eliminates nils from f.Blocks and updates each
+// BasicBlock.Index. Use this after any pass that may delete blocks.
+//
+func (f *Function) removeNilBlocks() {
+ j := 0
+ for _, b := range f.Blocks {
+ if b != nil {
+ b.Index = j
+ f.Blocks[j] = b
+ j++
+ }
+ }
+ // Nil out f.Blocks[j:] to aid GC.
+ for i := j; i < len(f.Blocks); i++ {
+ f.Blocks[i] = nil
+ }
+ f.Blocks = f.Blocks[:j]
+}
+
+// SetDebugMode sets the debug mode for package pkg. If true, all its
+// functions will include full debug info. This greatly increases the
+// size of the instruction stream, and causes Functions to depend upon
+// the ASTs, potentially keeping them live in memory for longer.
+//
+func (pkg *Package) SetDebugMode(debug bool) {
+ // TODO(adonovan): do we want ast.File granularity?
+ pkg.debug = debug
+}
+
+// debugInfo reports whether debug info is wanted for this function.
+func (f *Function) debugInfo() bool {
+ return f.Pkg != nil && f.Pkg.debug
+}
+
+// addNamedLocal creates a local variable, adds it to function f and
+// returns it. Its name and type are taken from obj. Subsequent
+// calls to f.lookup(obj) will return the same local.
+//
+func (f *Function) addNamedLocal(obj types.Object) *Alloc {
+ l := f.addLocal(obj.Type(), obj.Pos())
+ l.Comment = obj.Name()
+ f.objects[obj] = l
+ return l
+}
+
+func (f *Function) addLocalForIdent(id *ast.Ident) *Alloc {
+ return f.addNamedLocal(f.Pkg.info.Defs[id])
+}
+
+// addLocal creates an anonymous local variable of type typ, adds it
+// to function f and returns it. pos is the optional source location.
+//
+func (f *Function) addLocal(typ types.Type, pos token.Pos) *Alloc {
+ v := &Alloc{}
+ v.setType(types.NewPointer(typ))
+ v.setPos(pos)
+ f.Locals = append(f.Locals, v)
+ f.emit(v)
+ return v
+}
+
+// lookup returns the address of the named variable identified by obj
+// that is local to function f or one of its enclosing functions.
+// If escaping, the reference comes from a potentially escaping pointer
+// expression and the referent must be heap-allocated.
+//
+func (f *Function) lookup(obj types.Object, escaping bool) Value {
+ if v, ok := f.objects[obj]; ok {
+ if alloc, ok := v.(*Alloc); ok && escaping {
+ alloc.Heap = true
+ }
+ return v // function-local var (address)
+ }
+
+ // Definition must be in an enclosing function;
+ // plumb it through intervening closures.
+ if f.parent == nil {
+ panic("no Value for type.Object " + obj.Name())
+ }
+ outer := f.parent.lookup(obj, true) // escaping
+ v := &FreeVar{
+ name: obj.Name(),
+ typ: outer.Type(),
+ pos: outer.Pos(),
+ outer: outer,
+ parent: f,
+ }
+ f.objects[obj] = v
+ f.FreeVars = append(f.FreeVars, v)
+ return v
+}
+
+// emit emits the specified instruction to function f.
+func (f *Function) emit(instr Instruction) Value {
+ return f.currentBlock.emit(instr)
+}
+
+// RelString returns the full name of this function, qualified by
+// package name, receiver type, etc.
+//
+// The specific formatting rules are not guaranteed and may change.
+//
+// Examples:
+// "math.IsNaN" // a package-level function
+// "(*bytes.Buffer).Bytes" // a declared method or a wrapper
+// "(*bytes.Buffer).Bytes$thunk" // thunk (func wrapping method; receiver is param 0)
+// "(*bytes.Buffer).Bytes$bound" // bound (func wrapping method; receiver supplied by closure)
+// "main.main$1" // an anonymous function in main
+// "main.init#1" // a declared init function
+// "main.init" // the synthesized package initializer
+//
+// When these functions are referred to from within the same package
+// (i.e. from == f.Pkg.Object), they are rendered without the package path.
+// For example: "IsNaN", "(*Buffer).Bytes", etc.
+//
+// Invariant: all non-synthetic functions have distinct package-qualified names.
+//
+func (f *Function) RelString(from *types.Package) string {
+ // Anonymous?
+ if f.parent != nil {
+ // An anonymous function's Name() looks like "parentName$1",
+ // but its String() should include the type/package/etc.
+ parent := f.parent.RelString(from)
+ for i, anon := range f.parent.AnonFuncs {
+ if anon == f {
+ return fmt.Sprintf("%s$%d", parent, 1+i)
+ }
+ }
+
+ return f.name // should never happen
+ }
+
+ // Method (declared or wrapper)?
+ if recv := f.Signature.Recv(); recv != nil {
+ return f.relMethod(from, recv.Type())
+ }
+
+ // Thunk?
+ if f.method != nil {
+ return f.relMethod(from, f.method.Recv())
+ }
+
+ // Bound?
+ if len(f.FreeVars) == 1 && strings.HasSuffix(f.name, "$bound") {
+ return f.relMethod(from, f.FreeVars[0].Type())
+ }
+
+ // Package-level function?
+ // Prefix with package name for cross-package references only.
+ if p := f.pkgobj(); p != nil && p != from {
+ return fmt.Sprintf("%s.%s", p.Path(), f.name)
+ }
+
+ // Unknown.
+ return f.name
+}
+
+func (f *Function) relMethod(from *types.Package, recv types.Type) string {
+ return fmt.Sprintf("(%s).%s", relType(recv, from), f.name)
+}
+
+// writeSignature writes to buf the signature sig in declaration syntax.
+func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *types.Signature, params []*Parameter) {
+ buf.WriteString("func ")
+ if recv := sig.Recv(); recv != nil {
+ buf.WriteString("(")
+ if n := params[0].Name(); n != "" {
+ buf.WriteString(n)
+ buf.WriteString(" ")
+ }
+ types.WriteType(buf, from, params[0].Type())
+ buf.WriteString(") ")
+ }
+ buf.WriteString(name)
+ types.WriteSignature(buf, from, sig)
+}
+
+func (f *Function) pkgobj() *types.Package {
+ if f.Pkg != nil {
+ return f.Pkg.Object
+ }
+ return nil
+}
+
+var _ io.WriterTo = (*Function)(nil) // *Function implements io.Writer
+
+func (f *Function) WriteTo(w io.Writer) (int64, error) {
+ var buf bytes.Buffer
+ WriteFunction(&buf, f)
+ n, err := w.Write(buf.Bytes())
+ return int64(n), err
+}
+
+// WriteFunction writes to buf a human-readable "disassembly" of f.
+func WriteFunction(buf *bytes.Buffer, f *Function) {
+ fmt.Fprintf(buf, "# Name: %s\n", f.String())
+ if f.Pkg != nil {
+ fmt.Fprintf(buf, "# Package: %s\n", f.Pkg.Object.Path())
+ }
+ if syn := f.Synthetic; syn != "" {
+ fmt.Fprintln(buf, "# Synthetic:", syn)
+ }
+ if pos := f.Pos(); pos.IsValid() {
+ fmt.Fprintf(buf, "# Location: %s\n", f.Prog.Fset.Position(pos))
+ }
+
+ if f.parent != nil {
+ fmt.Fprintf(buf, "# Parent: %s\n", f.parent.Name())
+ }
+
+ if f.Recover != nil {
+ fmt.Fprintf(buf, "# Recover: %s\n", f.Recover)
+ }
+
+ from := f.pkgobj()
+
+ if f.FreeVars != nil {
+ buf.WriteString("# Free variables:\n")
+ for i, fv := range f.FreeVars {
+ fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, fv.Name(), relType(fv.Type(), from))
+ }
+ }
+
+ if len(f.Locals) > 0 {
+ buf.WriteString("# Locals:\n")
+ for i, l := range f.Locals {
+ fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(deref(l.Type()), from))
+ }
+ }
+ writeSignature(buf, from, f.Name(), f.Signature, f.Params)
+ buf.WriteString(":\n")
+
+ if f.Blocks == nil {
+ buf.WriteString("\t(external)\n")
+ }
+
+ // NB. column calculations are confused by non-ASCII
+ // characters and assume 8-space tabs.
+ const punchcard = 80 // for old time's sake.
+ const tabwidth = 8
+ for _, b := range f.Blocks {
+ if b == nil {
+ // Corrupt CFG.
+ fmt.Fprintf(buf, ".nil:\n")
+ continue
+ }
+ n, _ := fmt.Fprintf(buf, "%d:", b.Index)
+ bmsg := fmt.Sprintf("%s P:%d S:%d", b.Comment, len(b.Preds), len(b.Succs))
+ fmt.Fprintf(buf, "%*s%s\n", punchcard-1-n-len(bmsg), "", bmsg)
+
+ if false { // CFG debugging
+ fmt.Fprintf(buf, "\t# CFG: %s --> %s --> %s\n", b.Preds, b, b.Succs)
+ }
+ for _, instr := range b.Instrs {
+ buf.WriteString("\t")
+ switch v := instr.(type) {
+ case Value:
+ l := punchcard - tabwidth
+ // Left-align the instruction.
+ if name := v.Name(); name != "" {
+ n, _ := fmt.Fprintf(buf, "%s = ", name)
+ l -= n
+ }
+ n, _ := buf.WriteString(instr.String())
+ l -= n
+ // Right-align the type if there's space.
+ if t := v.Type(); t != nil {
+ buf.WriteByte(' ')
+ ts := relType(t, from)
+ l -= len(ts) + len(" ") // (spaces before and after type)
+ if l > 0 {
+ fmt.Fprintf(buf, "%*s", l, "")
+ }
+ buf.WriteString(ts)
+ }
+ case nil:
+ // Be robust against bad transforms.
+ buf.WriteString("")
+ default:
+ buf.WriteString(instr.String())
+ }
+ buf.WriteString("\n")
+ }
+ }
+ fmt.Fprintf(buf, "\n")
+}
+
+// newBasicBlock adds to f a new basic block and returns it. It does
+// not automatically become the current block for subsequent calls to emit.
+// comment is an optional string for more readable debugging output.
+//
+func (f *Function) newBasicBlock(comment string) *BasicBlock {
+ b := &BasicBlock{
+ Index: len(f.Blocks),
+ Comment: comment,
+ parent: f,
+ }
+ b.Succs = b.succs2[:0]
+ f.Blocks = append(f.Blocks, b)
+ return b
+}
+
+// NewFunction returns a new synthetic Function instance belonging to
+// prog, with its name and signature fields set as specified.
+//
+// The caller is responsible for initializing the remaining fields of
+// the function object, e.g. Pkg, Params, Blocks.
+//
+// It is practically impossible for clients to construct well-formed
+// SSA functions/packages/programs directly, so we assume this is the
+// job of the Builder alone. NewFunction exists to provide clients a
+// little flexibility. For example, analysis tools may wish to
+// construct fake Functions for the root of the callgraph, a fake
+// "reflect" package, etc.
+//
+// TODO(adonovan): think harder about the API here.
+//
+func (prog *Program) NewFunction(name string, sig *types.Signature, provenance string) *Function {
+ return &Function{Prog: prog, name: name, Signature: sig, Synthetic: provenance}
+}
+
+type extentNode [2]token.Pos
+
+func (n extentNode) Pos() token.Pos { return n[0] }
+func (n extentNode) End() token.Pos { return n[1] }
+
+// Syntax returns an ast.Node whose Pos/End methods provide the
+// lexical extent of the function if it was defined by Go source code
+// (f.Synthetic==""), or nil otherwise.
+//
+// If f was built with debug information (see Package.SetDebugRef),
+// the result is the *ast.FuncDecl or *ast.FuncLit that declared the
+// function. Otherwise, it is an opaque Node providing only position
+// information; this avoids pinning the AST in memory.
+//
+func (f *Function) Syntax() ast.Node { return f.syntax }
diff --git a/llgo/third_party/go.tools/go/ssa/interp/external.go b/llgo/third_party/go.tools/go/ssa/interp/external.go
new file mode 100644
index 0000000000000000000000000000000000000000..4f8a6ab938165781e935c80beab3878b388de3fd
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/external.go
@@ -0,0 +1,482 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package interp
+
+// Emulated functions that we cannot interpret because they are
+// external or because they use "unsafe" or "reflect" operations.
+
+import (
+ "math"
+ "os"
+ "runtime"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+type externalFn func(fr *frame, args []value) value
+
+// TODO(adonovan): fix: reflect.Value abstracts an lvalue or an
+// rvalue; Set() causes mutations that can be observed via aliases.
+// We have not captured that correctly here.
+
+// Key strings are from Function.String().
+var externals map[string]externalFn
+
+func init() {
+ // That little dot ۰ is an Arabic zero numeral (U+06F0), categories [Nd].
+ externals = map[string]externalFn{
+ "(*sync.Pool).Get": ext۰sync۰Pool۰Get,
+ "(*sync.Pool).Put": ext۰sync۰Pool۰Put,
+ "(reflect.Value).Bool": ext۰reflect۰Value۰Bool,
+ "(reflect.Value).CanAddr": ext۰reflect۰Value۰CanAddr,
+ "(reflect.Value).CanInterface": ext۰reflect۰Value۰CanInterface,
+ "(reflect.Value).Elem": ext۰reflect۰Value۰Elem,
+ "(reflect.Value).Field": ext۰reflect۰Value۰Field,
+ "(reflect.Value).Float": ext۰reflect۰Value۰Float,
+ "(reflect.Value).Index": ext۰reflect۰Value۰Index,
+ "(reflect.Value).Int": ext۰reflect۰Value۰Int,
+ "(reflect.Value).Interface": ext۰reflect۰Value۰Interface,
+ "(reflect.Value).IsNil": ext۰reflect۰Value۰IsNil,
+ "(reflect.Value).IsValid": ext۰reflect۰Value۰IsValid,
+ "(reflect.Value).Kind": ext۰reflect۰Value۰Kind,
+ "(reflect.Value).Len": ext۰reflect۰Value۰Len,
+ "(reflect.Value).MapIndex": ext۰reflect۰Value۰MapIndex,
+ "(reflect.Value).MapKeys": ext۰reflect۰Value۰MapKeys,
+ "(reflect.Value).NumField": ext۰reflect۰Value۰NumField,
+ "(reflect.Value).NumMethod": ext۰reflect۰Value۰NumMethod,
+ "(reflect.Value).Pointer": ext۰reflect۰Value۰Pointer,
+ "(reflect.Value).Set": ext۰reflect۰Value۰Set,
+ "(reflect.Value).String": ext۰reflect۰Value۰String,
+ "(reflect.Value).Type": ext۰reflect۰Value۰Type,
+ "(reflect.Value).Uint": ext۰reflect۰Value۰Uint,
+ "(reflect.error).Error": ext۰reflect۰error۰Error,
+ "(reflect.rtype).Bits": ext۰reflect۰rtype۰Bits,
+ "(reflect.rtype).Elem": ext۰reflect۰rtype۰Elem,
+ "(reflect.rtype).Field": ext۰reflect۰rtype۰Field,
+ "(reflect.rtype).Kind": ext۰reflect۰rtype۰Kind,
+ "(reflect.rtype).NumField": ext۰reflect۰rtype۰NumField,
+ "(reflect.rtype).NumMethod": ext۰reflect۰rtype۰NumMethod,
+ "(reflect.rtype).NumOut": ext۰reflect۰rtype۰NumOut,
+ "(reflect.rtype).Out": ext۰reflect۰rtype۰Out,
+ "(reflect.rtype).Size": ext۰reflect۰rtype۰Size,
+ "(reflect.rtype).String": ext۰reflect۰rtype۰String,
+ "bytes.Equal": ext۰bytes۰Equal,
+ "bytes.IndexByte": ext۰bytes۰IndexByte,
+ "hash/crc32.haveSSE42": ext۰crc32۰haveSSE42,
+ "math.Abs": ext۰math۰Abs,
+ "math.Exp": ext۰math۰Exp,
+ "math.Float32bits": ext۰math۰Float32bits,
+ "math.Float32frombits": ext۰math۰Float32frombits,
+ "math.Float64bits": ext۰math۰Float64bits,
+ "math.Float64frombits": ext۰math۰Float64frombits,
+ "math.Ldexp": ext۰math۰Ldexp,
+ "math.Log": ext۰math۰Log,
+ "math.Min": ext۰math۰Min,
+ "os.runtime_args": ext۰os۰runtime_args,
+ "reflect.New": ext۰reflect۰New,
+ "reflect.TypeOf": ext۰reflect۰TypeOf,
+ "reflect.ValueOf": ext۰reflect۰ValueOf,
+ "reflect.init": ext۰reflect۰Init,
+ "reflect.valueInterface": ext۰reflect۰valueInterface,
+ "runtime.Breakpoint": ext۰runtime۰Breakpoint,
+ "runtime.Caller": ext۰runtime۰Caller,
+ "runtime.Callers": ext۰runtime۰Callers,
+ "runtime.FuncForPC": ext۰runtime۰FuncForPC,
+ "runtime.GC": ext۰runtime۰GC,
+ "runtime.GOMAXPROCS": ext۰runtime۰GOMAXPROCS,
+ "runtime.Goexit": ext۰runtime۰Goexit,
+ "runtime.Gosched": ext۰runtime۰Gosched,
+ "runtime.init": ext۰runtime۰init,
+ "runtime.NumCPU": ext۰runtime۰NumCPU,
+ "runtime.ReadMemStats": ext۰runtime۰ReadMemStats,
+ "runtime.SetFinalizer": ext۰runtime۰SetFinalizer,
+ "(*runtime.Func).Entry": ext۰runtime۰Func۰Entry,
+ "(*runtime.Func).FileLine": ext۰runtime۰Func۰FileLine,
+ "(*runtime.Func).Name": ext۰runtime۰Func۰Name,
+ "runtime.environ": ext۰runtime۰environ,
+ "runtime.getgoroot": ext۰runtime۰getgoroot,
+ "strings.IndexByte": ext۰strings۰IndexByte,
+ "sync.runtime_Semacquire": ext۰sync۰runtime_Semacquire,
+ "sync.runtime_Semrelease": ext۰sync۰runtime_Semrelease,
+ "sync.runtime_Syncsemcheck": ext۰sync۰runtime_Syncsemcheck,
+ "sync.runtime_registerPoolCleanup": ext۰sync۰runtime_registerPoolCleanup,
+ "sync/atomic.AddInt32": ext۰atomic۰AddInt32,
+ "sync/atomic.AddUint32": ext۰atomic۰AddUint32,
+ "sync/atomic.AddUint64": ext۰atomic۰AddUint64,
+ "sync/atomic.CompareAndSwapInt32": ext۰atomic۰CompareAndSwapInt32,
+ "sync/atomic.LoadInt32": ext۰atomic۰LoadInt32,
+ "sync/atomic.LoadUint32": ext۰atomic۰LoadUint32,
+ "sync/atomic.StoreInt32": ext۰atomic۰StoreInt32,
+ "sync/atomic.StoreUint32": ext۰atomic۰StoreUint32,
+ "syscall.Close": ext۰syscall۰Close,
+ "syscall.Exit": ext۰syscall۰Exit,
+ "syscall.Fstat": ext۰syscall۰Fstat,
+ "syscall.Getpid": ext۰syscall۰Getpid,
+ "syscall.Getwd": ext۰syscall۰Getwd,
+ "syscall.Kill": ext۰syscall۰Kill,
+ "syscall.Lstat": ext۰syscall۰Lstat,
+ "syscall.Open": ext۰syscall۰Open,
+ "syscall.ParseDirent": ext۰syscall۰ParseDirent,
+ "syscall.RawSyscall": ext۰syscall۰RawSyscall,
+ "syscall.Read": ext۰syscall۰Read,
+ "syscall.ReadDirent": ext۰syscall۰ReadDirent,
+ "syscall.Stat": ext۰syscall۰Stat,
+ "syscall.Write": ext۰syscall۰Write,
+ "syscall.runtime_envs": ext۰runtime۰environ,
+ "time.Sleep": ext۰time۰Sleep,
+ "time.now": ext۰time۰now,
+ }
+}
+
+// wrapError returns an interpreted 'error' interface value for err.
+func wrapError(err error) value {
+ if err == nil {
+ return iface{}
+ }
+ return iface{t: errorType, v: err.Error()}
+}
+
+func ext۰sync۰Pool۰Get(fr *frame, args []value) value {
+ Pool := fr.i.prog.ImportedPackage("sync").Type("Pool").Object()
+ _, newIndex, _ := types.LookupFieldOrMethod(Pool.Type(), false, Pool.Pkg(), "New")
+
+ if New := (*args[0].(*value)).(structure)[newIndex[0]]; New != nil {
+ return call(fr.i, fr, 0, New, nil)
+ }
+ return nil
+}
+
+func ext۰sync۰Pool۰Put(fr *frame, args []value) value {
+ return nil
+}
+
+func ext۰bytes۰Equal(fr *frame, args []value) value {
+ // func Equal(a, b []byte) bool
+ a := args[0].([]value)
+ b := args[1].([]value)
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func ext۰bytes۰IndexByte(fr *frame, args []value) value {
+ // func IndexByte(s []byte, c byte) int
+ s := args[0].([]value)
+ c := args[1].(byte)
+ for i, b := range s {
+ if b.(byte) == c {
+ return i
+ }
+ }
+ return -1
+}
+
+func ext۰crc32۰haveSSE42(fr *frame, args []value) value {
+ return false
+}
+
+func ext۰math۰Float64frombits(fr *frame, args []value) value {
+ return math.Float64frombits(args[0].(uint64))
+}
+
+func ext۰math۰Float64bits(fr *frame, args []value) value {
+ return math.Float64bits(args[0].(float64))
+}
+
+func ext۰math۰Float32frombits(fr *frame, args []value) value {
+ return math.Float32frombits(args[0].(uint32))
+}
+
+func ext۰math۰Abs(fr *frame, args []value) value {
+ return math.Abs(args[0].(float64))
+}
+
+func ext۰math۰Exp(fr *frame, args []value) value {
+ return math.Exp(args[0].(float64))
+}
+
+func ext۰math۰Float32bits(fr *frame, args []value) value {
+ return math.Float32bits(args[0].(float32))
+}
+
+func ext۰math۰Min(fr *frame, args []value) value {
+ return math.Min(args[0].(float64), args[1].(float64))
+}
+
+func ext۰math۰Ldexp(fr *frame, args []value) value {
+ return math.Ldexp(args[0].(float64), args[1].(int))
+}
+
+func ext۰math۰Log(fr *frame, args []value) value {
+ return math.Log(args[0].(float64))
+}
+
+func ext۰os۰runtime_args(fr *frame, args []value) value {
+ return fr.i.osArgs
+}
+
+func ext۰runtime۰Breakpoint(fr *frame, args []value) value {
+ runtime.Breakpoint()
+ return nil
+}
+
+func ext۰runtime۰Caller(fr *frame, args []value) value {
+ // func Caller(skip int) (pc uintptr, file string, line int, ok bool)
+ skip := 1 + args[0].(int)
+ for i := 0; i < skip; i++ {
+ if fr != nil {
+ fr = fr.caller
+ }
+ }
+ var pc uintptr
+ var file string
+ var line int
+ var ok bool
+ if fr != nil {
+ fn := fr.fn
+ // TODO(adonovan): use pc/posn of current instruction, not start of fn.
+ pc = uintptr(unsafe.Pointer(fn))
+ posn := fn.Prog.Fset.Position(fn.Pos())
+ file = posn.Filename
+ line = posn.Line
+ ok = true
+ }
+ return tuple{pc, file, line, ok}
+}
+
+func ext۰runtime۰Callers(fr *frame, args []value) value {
+ // Callers(skip int, pc []uintptr) int
+ skip := args[0].(int)
+ pc := args[1].([]value)
+ for i := 0; i < skip; i++ {
+ if fr != nil {
+ fr = fr.caller
+ }
+ }
+ i := 0
+ for fr != nil {
+ pc[i] = uintptr(unsafe.Pointer(fr.fn))
+ i++
+ fr = fr.caller
+ }
+ return i
+}
+
+func ext۰runtime۰FuncForPC(fr *frame, args []value) value {
+ // FuncForPC(pc uintptr) *Func
+ pc := args[0].(uintptr)
+ var fn *ssa.Function
+ if pc != 0 {
+ fn = (*ssa.Function)(unsafe.Pointer(pc)) // indeed unsafe!
+ }
+ var Func value
+ Func = structure{fn} // a runtime.Func
+ return &Func
+}
+
+func ext۰runtime۰environ(fr *frame, args []value) value {
+ // This function also implements syscall.runtime_envs.
+ return environ
+}
+
+func ext۰runtime۰getgoroot(fr *frame, args []value) value {
+ return os.Getenv("GOROOT")
+}
+
+func ext۰strings۰IndexByte(fr *frame, args []value) value {
+ // func IndexByte(s string, c byte) int
+ s := args[0].(string)
+ c := args[1].(byte)
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ return i
+ }
+ }
+ return -1
+}
+
+func ext۰sync۰runtime_Syncsemcheck(fr *frame, args []value) value {
+ // TODO(adonovan): fix: implement.
+ return nil
+}
+
+func ext۰sync۰runtime_registerPoolCleanup(fr *frame, args []value) value {
+ return nil
+}
+
+func ext۰sync۰runtime_Semacquire(fr *frame, args []value) value {
+ // TODO(adonovan): fix: implement.
+ return nil
+}
+
+func ext۰sync۰runtime_Semrelease(fr *frame, args []value) value {
+ // TODO(adonovan): fix: implement.
+ return nil
+}
+
+func ext۰runtime۰GOMAXPROCS(fr *frame, args []value) value {
+ return runtime.GOMAXPROCS(args[0].(int))
+}
+
+func ext۰runtime۰Goexit(fr *frame, args []value) value {
+ // TODO(adonovan): don't kill the interpreter's main goroutine.
+ runtime.Goexit()
+ return nil
+}
+
+func ext۰runtime۰GC(fr *frame, args []value) value {
+ runtime.GC()
+ return nil
+}
+
+func ext۰runtime۰Gosched(fr *frame, args []value) value {
+ runtime.Gosched()
+ return nil
+}
+
+func ext۰runtime۰init(fr *frame, args []value) value {
+ return nil
+}
+
+func ext۰runtime۰NumCPU(fr *frame, args []value) value {
+ return runtime.NumCPU()
+}
+
+func ext۰runtime۰ReadMemStats(fr *frame, args []value) value {
+ // TODO(adonovan): populate args[0].(Struct)
+ return nil
+}
+
+func ext۰atomic۰LoadUint32(fr *frame, args []value) value {
+ // TODO(adonovan): fix: not atomic!
+ return (*args[0].(*value)).(uint32)
+}
+
+func ext۰atomic۰StoreUint32(fr *frame, args []value) value {
+ // TODO(adonovan): fix: not atomic!
+ *args[0].(*value) = args[1].(uint32)
+ return nil
+}
+
+func ext۰atomic۰LoadInt32(fr *frame, args []value) value {
+ // TODO(adonovan): fix: not atomic!
+ return (*args[0].(*value)).(int32)
+}
+
+func ext۰atomic۰StoreInt32(fr *frame, args []value) value {
+ // TODO(adonovan): fix: not atomic!
+ *args[0].(*value) = args[1].(int32)
+ return nil
+}
+
+func ext۰atomic۰CompareAndSwapInt32(fr *frame, args []value) value {
+ // TODO(adonovan): fix: not atomic!
+ p := args[0].(*value)
+ if (*p).(int32) == args[1].(int32) {
+ *p = args[2].(int32)
+ return true
+ }
+ return false
+}
+
+func ext۰atomic۰AddInt32(fr *frame, args []value) value {
+ // TODO(adonovan): fix: not atomic!
+ p := args[0].(*value)
+ newv := (*p).(int32) + args[1].(int32)
+ *p = newv
+ return newv
+}
+
+func ext۰atomic۰AddUint32(fr *frame, args []value) value {
+ // TODO(adonovan): fix: not atomic!
+ p := args[0].(*value)
+ newv := (*p).(uint32) + args[1].(uint32)
+ *p = newv
+ return newv
+}
+
+func ext۰atomic۰AddUint64(fr *frame, args []value) value {
+ // TODO(adonovan): fix: not atomic!
+ p := args[0].(*value)
+ newv := (*p).(uint64) + args[1].(uint64)
+ *p = newv
+ return newv
+}
+
+func ext۰runtime۰SetFinalizer(fr *frame, args []value) value {
+ return nil // ignore
+}
+
+// Pretend: type runtime.Func struct { entry *ssa.Function }
+
+func ext۰runtime۰Func۰FileLine(fr *frame, args []value) value {
+ // func (*runtime.Func) FileLine(uintptr) (string, int)
+ f, _ := (*args[0].(*value)).(structure)[0].(*ssa.Function)
+ pc := args[1].(uintptr)
+ _ = pc
+ if f != nil {
+ // TODO(adonovan): use position of current instruction, not fn.
+ posn := f.Prog.Fset.Position(f.Pos())
+ return tuple{posn.Filename, posn.Line}
+ }
+ return tuple{"", 0}
+}
+
+func ext۰runtime۰Func۰Name(fr *frame, args []value) value {
+ // func (*runtime.Func) Name() string
+ f, _ := (*args[0].(*value)).(structure)[0].(*ssa.Function)
+ if f != nil {
+ return f.String()
+ }
+ return ""
+}
+
+func ext۰runtime۰Func۰Entry(fr *frame, args []value) value {
+ // func (*runtime.Func) Entry() uintptr
+ f, _ := (*args[0].(*value)).(structure)[0].(*ssa.Function)
+ return uintptr(unsafe.Pointer(f))
+}
+
+func ext۰time۰now(fr *frame, args []value) value {
+ nano := time.Now().UnixNano()
+ return tuple{int64(nano / 1e9), int32(nano % 1e9)}
+}
+
+func ext۰time۰Sleep(fr *frame, args []value) value {
+ time.Sleep(time.Duration(args[0].(int64)))
+ return nil
+}
+
+func ext۰syscall۰Exit(fr *frame, args []value) value {
+ panic(exitPanic(args[0].(int)))
+}
+
+func ext۰syscall۰Getwd(fr *frame, args []value) value {
+ s, err := syscall.Getwd()
+ return tuple{s, wrapError(err)}
+}
+
+func ext۰syscall۰Getpid(fr *frame, args []value) value {
+ return syscall.Getpid()
+}
+
+func valueToBytes(v value) []byte {
+ in := v.([]value)
+ b := make([]byte, len(in))
+ for i := range in {
+ b[i] = in[i].(byte)
+ }
+ return b
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/external_darwin.go b/llgo/third_party/go.tools/go/ssa/interp/external_darwin.go
new file mode 100644
index 0000000000000000000000000000000000000000..4974ad60169ac0d945b4612e15eb8ab2efc7301c
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/external_darwin.go
@@ -0,0 +1,18 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin
+
+package interp
+
+import "syscall"
+
+func init() {
+ externals["syscall.Sysctl"] = ext۰syscall۰Sysctl
+}
+
+func ext۰syscall۰Sysctl(fr *frame, args []value) value {
+ r, err := syscall.Sysctl(args[0].(string))
+ return tuple{r, wrapError(err)}
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/external_freebsd.go b/llgo/third_party/go.tools/go/ssa/interp/external_freebsd.go
new file mode 100644
index 0000000000000000000000000000000000000000..52033038231a14d86c6e5c7d611767eda66a1d23
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/external_freebsd.go
@@ -0,0 +1,24 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd
+
+package interp
+
+import "syscall"
+
+func init() {
+ externals["syscall.Sysctl"] = ext۰syscall۰Sysctl
+ externals["syscall.SysctlUint32"] = ext۰syscall۰SysctlUint32
+}
+
+func ext۰syscall۰Sysctl(fr *frame, args []value) value {
+ r, err := syscall.Sysctl(args[0].(string))
+ return tuple{r, wrapError(err)}
+}
+
+func ext۰syscall۰SysctlUint32(fr *frame, args []value) value {
+ r, err := syscall.SysctlUint32(args[0].(string))
+ return tuple{r, wrapError(err)}
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/external_plan9.go b/llgo/third_party/go.tools/go/ssa/interp/external_plan9.go
new file mode 100644
index 0000000000000000000000000000000000000000..05d02d56e130d1d63cf80f71e5f7468bd747e290
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/external_plan9.go
@@ -0,0 +1,47 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package interp
+
+import "syscall"
+
+func ext۰syscall۰Close(fr *frame, args []value) value {
+ panic("syscall.Close not yet implemented")
+}
+func ext۰syscall۰Fstat(fr *frame, args []value) value {
+ panic("syscall.Fstat not yet implemented")
+}
+func ext۰syscall۰Kill(fr *frame, args []value) value {
+ panic("syscall.Kill not yet implemented")
+}
+func ext۰syscall۰Lstat(fr *frame, args []value) value {
+ panic("syscall.Lstat not yet implemented")
+}
+func ext۰syscall۰Open(fr *frame, args []value) value {
+ panic("syscall.Open not yet implemented")
+}
+func ext۰syscall۰ParseDirent(fr *frame, args []value) value {
+ panic("syscall.ParseDirent not yet implemented")
+}
+func ext۰syscall۰Read(fr *frame, args []value) value {
+ panic("syscall.Read not yet implemented")
+}
+func ext۰syscall۰ReadDirent(fr *frame, args []value) value {
+ panic("syscall.ReadDirent not yet implemented")
+}
+func ext۰syscall۰Stat(fr *frame, args []value) value {
+ panic("syscall.Stat not yet implemented")
+}
+func ext۰syscall۰Write(fr *frame, args []value) value {
+ // func Write(fd int, p []byte) (n int, err error)
+ n, err := write(args[0].(int), valueToBytes(args[1]))
+ return tuple{n, wrapError(err)}
+}
+func ext۰syscall۰RawSyscall(fr *frame, args []value) value {
+ return tuple{^uintptr(0), uintptr(0), uintptr(0)}
+}
+
+func syswrite(fd int, b []byte) (int, error) {
+ return syscall.Write(fd, b)
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/external_unix.go b/llgo/third_party/go.tools/go/ssa/interp/external_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..c482eabba52d7feee8f539655346a736ffd0d1c1
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/external_unix.go
@@ -0,0 +1,132 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !windows,!plan9
+
+package interp
+
+import "syscall"
+
+func fillStat(st *syscall.Stat_t, stat structure) {
+ stat[0] = st.Dev
+ stat[1] = st.Ino
+ stat[2] = st.Nlink
+ stat[3] = st.Mode
+ stat[4] = st.Uid
+ stat[5] = st.Gid
+
+ stat[7] = st.Rdev
+ stat[8] = st.Size
+ stat[9] = st.Blksize
+ stat[10] = st.Blocks
+ // TODO(adonovan): fix: copy Timespecs.
+ // stat[11] = st.Atim
+ // stat[12] = st.Mtim
+ // stat[13] = st.Ctim
+}
+
+func ext۰syscall۰Close(fr *frame, args []value) value {
+ // func Close(fd int) (err error)
+ return wrapError(syscall.Close(args[0].(int)))
+}
+
+func ext۰syscall۰Fstat(fr *frame, args []value) value {
+ // func Fstat(fd int, stat *Stat_t) (err error)
+ fd := args[0].(int)
+ stat := (*args[1].(*value)).(structure)
+
+ var st syscall.Stat_t
+ err := syscall.Fstat(fd, &st)
+ fillStat(&st, stat)
+ return wrapError(err)
+}
+
+func ext۰syscall۰ReadDirent(fr *frame, args []value) value {
+ // func ReadDirent(fd int, buf []byte) (n int, err error)
+ fd := args[0].(int)
+ p := args[1].([]value)
+ b := make([]byte, len(p))
+ n, err := syscall.ReadDirent(fd, b)
+ for i := 0; i < n; i++ {
+ p[i] = b[i]
+ }
+ return tuple{n, wrapError(err)}
+}
+
+func ext۰syscall۰Kill(fr *frame, args []value) value {
+ // func Kill(pid int, sig Signal) (err error)
+ return wrapError(syscall.Kill(args[0].(int), syscall.Signal(args[1].(int))))
+}
+
+func ext۰syscall۰Lstat(fr *frame, args []value) value {
+ // func Lstat(name string, stat *Stat_t) (err error)
+ name := args[0].(string)
+ stat := (*args[1].(*value)).(structure)
+
+ var st syscall.Stat_t
+ err := syscall.Lstat(name, &st)
+ fillStat(&st, stat)
+ return wrapError(err)
+}
+
+func ext۰syscall۰Open(fr *frame, args []value) value {
+ // func Open(path string, mode int, perm uint32) (fd int, err error) {
+ path := args[0].(string)
+ mode := args[1].(int)
+ perm := args[2].(uint32)
+ fd, err := syscall.Open(path, mode, perm)
+ return tuple{fd, wrapError(err)}
+}
+
+func ext۰syscall۰ParseDirent(fr *frame, args []value) value {
+ // func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string)
+ max := args[1].(int)
+ var names []string
+ for _, iname := range args[2].([]value) {
+ names = append(names, iname.(string))
+ }
+ consumed, count, newnames := syscall.ParseDirent(valueToBytes(args[0]), max, names)
+ var inewnames []value
+ for _, newname := range newnames {
+ inewnames = append(inewnames, newname)
+ }
+ return tuple{consumed, count, inewnames}
+}
+
+func ext۰syscall۰Read(fr *frame, args []value) value {
+ // func Read(fd int, p []byte) (n int, err error)
+ fd := args[0].(int)
+ p := args[1].([]value)
+ b := make([]byte, len(p))
+ n, err := syscall.Read(fd, b)
+ for i := 0; i < n; i++ {
+ p[i] = b[i]
+ }
+ return tuple{n, wrapError(err)}
+}
+
+func ext۰syscall۰Stat(fr *frame, args []value) value {
+ // func Stat(name string, stat *Stat_t) (err error)
+ name := args[0].(string)
+ stat := (*args[1].(*value)).(structure)
+
+ var st syscall.Stat_t
+ err := syscall.Stat(name, &st)
+ fillStat(&st, stat)
+ return wrapError(err)
+}
+
+func ext۰syscall۰Write(fr *frame, args []value) value {
+ // func Write(fd int, p []byte) (n int, err error)
+ n, err := write(args[0].(int), valueToBytes(args[1]))
+ return tuple{n, wrapError(err)}
+}
+
+func ext۰syscall۰RawSyscall(fr *frame, args []value) value {
+ return tuple{uintptr(0), uintptr(0), uintptr(syscall.ENOSYS)}
+}
+
+func syswrite(fd int, b []byte) (int, error) {
+ return syscall.Write(fd, b)
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/external_windows.go b/llgo/third_party/go.tools/go/ssa/interp/external_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..ef28a37138b18cb1a675edd4f795911b323d60e1
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/external_windows.go
@@ -0,0 +1,44 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package interp
+
+import "syscall"
+
+func ext۰syscall۰Close(fr *frame, args []value) value {
+ panic("syscall.Close not yet implemented")
+}
+func ext۰syscall۰Fstat(fr *frame, args []value) value {
+ panic("syscall.Fstat not yet implemented")
+}
+func ext۰syscall۰Kill(fr *frame, args []value) value {
+ panic("syscall.Kill not yet implemented")
+}
+func ext۰syscall۰Lstat(fr *frame, args []value) value {
+ panic("syscall.Lstat not yet implemented")
+}
+func ext۰syscall۰Open(fr *frame, args []value) value {
+ panic("syscall.Open not yet implemented")
+}
+func ext۰syscall۰ParseDirent(fr *frame, args []value) value {
+ panic("syscall.ParseDirent not yet implemented")
+}
+func ext۰syscall۰Read(fr *frame, args []value) value {
+ panic("syscall.Read not yet implemented")
+}
+func ext۰syscall۰ReadDirent(fr *frame, args []value) value {
+ panic("syscall.ReadDirent not yet implemented")
+}
+func ext۰syscall۰Stat(fr *frame, args []value) value {
+ panic("syscall.Stat not yet implemented")
+}
+func ext۰syscall۰Write(fr *frame, args []value) value {
+ panic("syscall.Write not yet implemented")
+}
+func ext۰syscall۰RawSyscall(fr *frame, args []value) value {
+ return tuple{uintptr(0), uintptr(0), uintptr(syscall.ENOSYS)}
+}
+func syswrite(fd int, b []byte) (int, error) {
+ panic("syswrite not yet implemented")
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/interp.go b/llgo/third_party/go.tools/go/ssa/interp/interp.go
new file mode 100644
index 0000000000000000000000000000000000000000..773e7ee1bbbf87c8dbc646c468475fcac3dea26b
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/interp.go
@@ -0,0 +1,744 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ssa/interp defines an interpreter for the SSA
+// representation of Go programs.
+//
+// This interpreter is provided as an adjunct for testing the SSA
+// construction algorithm. Its purpose is to provide a minimal
+// metacircular implementation of the dynamic semantics of each SSA
+// instruction. It is not, and will never be, a production-quality Go
+// interpreter.
+//
+// The following is a partial list of Go features that are currently
+// unsupported or incomplete in the interpreter.
+//
+// * Unsafe operations, including all uses of unsafe.Pointer, are
+// impossible to support given the "boxed" value representation we
+// have chosen.
+//
+// * The reflect package is only partially implemented.
+//
+// * "sync/atomic" operations are not currently atomic due to the
+// "boxed" value representation: it is not possible to read, modify
+// and write an interface value atomically. As a consequence, Mutexes
+// are currently broken. TODO(adonovan): provide a metacircular
+// implementation of Mutex avoiding the broken atomic primitives.
+//
+// * recover is only partially implemented. Also, the interpreter
+// makes no attempt to distinguish target panics from interpreter
+// crashes.
+//
+// * map iteration is asymptotically inefficient.
+//
+// * the sizes of the int, uint and uintptr types in the target
+// program are assumed to be the same as those of the interpreter
+// itself.
+//
+// * all values occupy space, even those of types defined by the spec
+// to have zero size, e.g. struct{}. This can cause asymptotic
+// performance degradation.
+//
+// * os.Exit is implemented using panic, causing deferred functions to
+// run.
+package interp
+
+import (
+ "fmt"
+ "go/token"
+ "os"
+ "reflect"
+ "runtime"
+
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+type continuation int
+
+const (
+ kNext continuation = iota
+ kReturn
+ kJump
+)
+
+// Mode is a bitmask of options affecting the interpreter.
+type Mode uint
+
+const (
+ DisableRecover Mode = 1 << iota // Disable recover() in target programs; show interpreter crash instead.
+ EnableTracing // Print a trace of all instructions as they are interpreted.
+)
+
+type methodSet map[string]*ssa.Function
+
+// State shared between all interpreted goroutines.
+type interpreter struct {
+ osArgs []value // the value of os.Args
+ prog *ssa.Program // the SSA program
+ globals map[ssa.Value]*value // addresses of global variables (immutable)
+ mode Mode // interpreter options
+ reflectPackage *ssa.Package // the fake reflect package
+ errorMethods methodSet // the method set of reflect.error, which implements the error interface.
+ rtypeMethods methodSet // the method set of rtype, which implements the reflect.Type interface.
+ runtimeErrorString types.Type // the runtime.errorString type
+ sizes types.Sizes // the effective type-sizing function
+}
+
+type deferred struct {
+ fn value
+ args []value
+ instr *ssa.Defer
+ tail *deferred
+}
+
+type frame struct {
+ i *interpreter
+ caller *frame
+ fn *ssa.Function
+ block, prevBlock *ssa.BasicBlock
+ env map[ssa.Value]value // dynamic values of SSA variables
+ locals []value
+ defers *deferred
+ result value
+ panicking bool
+ panic interface{}
+}
+
+func (fr *frame) get(key ssa.Value) value {
+ switch key := key.(type) {
+ case nil:
+ // Hack; simplifies handling of optional attributes
+ // such as ssa.Slice.{Low,High}.
+ return nil
+ case *ssa.Function, *ssa.Builtin:
+ return key
+ case *ssa.Const:
+ return constValue(key)
+ case *ssa.Global:
+ if r, ok := fr.i.globals[key]; ok {
+ return r
+ }
+ }
+ if r, ok := fr.env[key]; ok {
+ return r
+ }
+ panic(fmt.Sprintf("get: no value for %T: %v", key, key.Name()))
+}
+
+// runDefer runs a deferred call d.
+// It always returns normally, but may set or clear fr.panic.
+//
+func (fr *frame) runDefer(d *deferred) {
+ if fr.i.mode&EnableTracing != 0 {
+ fmt.Fprintf(os.Stderr, "%s: invoking deferred function call\n",
+ fr.i.prog.Fset.Position(d.instr.Pos()))
+ }
+ var ok bool
+ defer func() {
+ if !ok {
+ // Deferred call created a new state of panic.
+ fr.panicking = true
+ fr.panic = recover()
+ }
+ }()
+ call(fr.i, fr, d.instr.Pos(), d.fn, d.args)
+ ok = true
+}
+
+// runDefers executes fr's deferred function calls in LIFO order.
+//
+// On entry, fr.panicking indicates a state of panic; if
+// true, fr.panic contains the panic value.
+//
+// On completion, if a deferred call started a panic, or if no
+// deferred call recovered from a previous state of panic, then
+// runDefers itself panics after the last deferred call has run.
+//
+// If there was no initial state of panic, or it was recovered from,
+// runDefers returns normally.
+//
+func (fr *frame) runDefers() {
+ for d := fr.defers; d != nil; d = d.tail {
+ fr.runDefer(d)
+ }
+ fr.defers = nil
+ if fr.panicking {
+ panic(fr.panic) // new panic, or still panicking
+ }
+}
+
+// lookupMethod returns the method set for type typ, which may be one
+// of the interpreter's fake types.
+func lookupMethod(i *interpreter, typ types.Type, meth *types.Func) *ssa.Function {
+ switch typ {
+ case rtypeType:
+ return i.rtypeMethods[meth.Id()]
+ case errorType:
+ return i.errorMethods[meth.Id()]
+ }
+ return i.prog.LookupMethod(typ, meth.Pkg(), meth.Name())
+}
+
+// visitInstr interprets a single ssa.Instruction within the activation
+// record frame. It returns a continuation value indicating where to
+// read the next instruction from.
+func visitInstr(fr *frame, instr ssa.Instruction) continuation {
+ switch instr := instr.(type) {
+ case *ssa.DebugRef:
+ // no-op
+
+ case *ssa.UnOp:
+ fr.env[instr] = unop(instr, fr.get(instr.X))
+
+ case *ssa.BinOp:
+ fr.env[instr] = binop(instr.Op, instr.X.Type(), fr.get(instr.X), fr.get(instr.Y))
+
+ case *ssa.Call:
+ fn, args := prepareCall(fr, &instr.Call)
+ fr.env[instr] = call(fr.i, fr, instr.Pos(), fn, args)
+
+ case *ssa.ChangeInterface:
+ fr.env[instr] = fr.get(instr.X)
+
+ case *ssa.ChangeType:
+ fr.env[instr] = fr.get(instr.X) // (can't fail)
+
+ case *ssa.Convert:
+ fr.env[instr] = conv(instr.Type(), instr.X.Type(), fr.get(instr.X))
+
+ case *ssa.MakeInterface:
+ fr.env[instr] = iface{t: instr.X.Type(), v: fr.get(instr.X)}
+
+ case *ssa.Extract:
+ fr.env[instr] = fr.get(instr.Tuple).(tuple)[instr.Index]
+
+ case *ssa.Slice:
+ fr.env[instr] = slice(fr.get(instr.X), fr.get(instr.Low), fr.get(instr.High), fr.get(instr.Max))
+
+ case *ssa.Return:
+ switch len(instr.Results) {
+ case 0:
+ case 1:
+ fr.result = fr.get(instr.Results[0])
+ default:
+ var res []value
+ for _, r := range instr.Results {
+ res = append(res, fr.get(r))
+ }
+ fr.result = tuple(res)
+ }
+ fr.block = nil
+ return kReturn
+
+ case *ssa.RunDefers:
+ fr.runDefers()
+
+ case *ssa.Panic:
+ panic(targetPanic{fr.get(instr.X)})
+
+ case *ssa.Send:
+ fr.get(instr.Chan).(chan value) <- copyVal(fr.get(instr.X))
+
+ case *ssa.Store:
+ *fr.get(instr.Addr).(*value) = copyVal(fr.get(instr.Val))
+
+ case *ssa.If:
+ succ := 1
+ if fr.get(instr.Cond).(bool) {
+ succ = 0
+ }
+ fr.prevBlock, fr.block = fr.block, fr.block.Succs[succ]
+ return kJump
+
+ case *ssa.Jump:
+ fr.prevBlock, fr.block = fr.block, fr.block.Succs[0]
+ return kJump
+
+ case *ssa.Defer:
+ fn, args := prepareCall(fr, &instr.Call)
+ fr.defers = &deferred{
+ fn: fn,
+ args: args,
+ instr: instr,
+ tail: fr.defers,
+ }
+
+ case *ssa.Go:
+ fn, args := prepareCall(fr, &instr.Call)
+ go call(fr.i, nil, instr.Pos(), fn, args)
+
+ case *ssa.MakeChan:
+ fr.env[instr] = make(chan value, asInt(fr.get(instr.Size)))
+
+ case *ssa.Alloc:
+ var addr *value
+ if instr.Heap {
+ // new
+ addr = new(value)
+ fr.env[instr] = addr
+ } else {
+ // local
+ addr = fr.env[instr].(*value)
+ }
+ *addr = zero(deref(instr.Type()))
+
+ case *ssa.MakeSlice:
+ slice := make([]value, asInt(fr.get(instr.Cap)))
+ tElt := instr.Type().Underlying().(*types.Slice).Elem()
+ for i := range slice {
+ slice[i] = zero(tElt)
+ }
+ fr.env[instr] = slice[:asInt(fr.get(instr.Len))]
+
+ case *ssa.MakeMap:
+ reserve := 0
+ if instr.Reserve != nil {
+ reserve = asInt(fr.get(instr.Reserve))
+ }
+ fr.env[instr] = makeMap(instr.Type().Underlying().(*types.Map).Key(), reserve)
+
+ case *ssa.Range:
+ fr.env[instr] = rangeIter(fr.get(instr.X), instr.X.Type())
+
+ case *ssa.Next:
+ fr.env[instr] = fr.get(instr.Iter).(iter).next()
+
+ case *ssa.FieldAddr:
+ x := fr.get(instr.X)
+ fr.env[instr] = &(*x.(*value)).(structure)[instr.Field]
+
+ case *ssa.Field:
+ fr.env[instr] = copyVal(fr.get(instr.X).(structure)[instr.Field])
+
+ case *ssa.IndexAddr:
+ x := fr.get(instr.X)
+ idx := fr.get(instr.Index)
+ switch x := x.(type) {
+ case []value:
+ fr.env[instr] = &x[asInt(idx)]
+ case *value: // *array
+ fr.env[instr] = &(*x).(array)[asInt(idx)]
+ default:
+ panic(fmt.Sprintf("unexpected x type in IndexAddr: %T", x))
+ }
+
+ case *ssa.Index:
+ fr.env[instr] = copyVal(fr.get(instr.X).(array)[asInt(fr.get(instr.Index))])
+
+ case *ssa.Lookup:
+ fr.env[instr] = lookup(instr, fr.get(instr.X), fr.get(instr.Index))
+
+ case *ssa.MapUpdate:
+ m := fr.get(instr.Map)
+ key := fr.get(instr.Key)
+ v := fr.get(instr.Value)
+ switch m := m.(type) {
+ case map[value]value:
+ m[key] = v
+ case *hashmap:
+ m.insert(key.(hashable), v)
+ default:
+ panic(fmt.Sprintf("illegal map type: %T", m))
+ }
+
+ case *ssa.TypeAssert:
+ fr.env[instr] = typeAssert(fr.i, instr, fr.get(instr.X).(iface))
+
+ case *ssa.MakeClosure:
+ var bindings []value
+ for _, binding := range instr.Bindings {
+ bindings = append(bindings, fr.get(binding))
+ }
+ fr.env[instr] = &closure{instr.Fn.(*ssa.Function), bindings}
+
+ case *ssa.Phi:
+ for i, pred := range instr.Block().Preds {
+ if fr.prevBlock == pred {
+ fr.env[instr] = fr.get(instr.Edges[i])
+ break
+ }
+ }
+
+ case *ssa.Select:
+ var cases []reflect.SelectCase
+ if !instr.Blocking {
+ cases = append(cases, reflect.SelectCase{
+ Dir: reflect.SelectDefault,
+ })
+ }
+ for _, state := range instr.States {
+ var dir reflect.SelectDir
+ if state.Dir == types.RecvOnly {
+ dir = reflect.SelectRecv
+ } else {
+ dir = reflect.SelectSend
+ }
+ var send reflect.Value
+ if state.Send != nil {
+ send = reflect.ValueOf(fr.get(state.Send))
+ }
+ cases = append(cases, reflect.SelectCase{
+ Dir: dir,
+ Chan: reflect.ValueOf(fr.get(state.Chan)),
+ Send: send,
+ })
+ }
+ chosen, recv, recvOk := reflect.Select(cases)
+ if !instr.Blocking {
+ chosen-- // default case should have index -1.
+ }
+ r := tuple{chosen, recvOk}
+ for i, st := range instr.States {
+ if st.Dir == types.RecvOnly {
+ var v value
+ if i == chosen && recvOk {
+ // No need to copy since send makes an unaliased copy.
+ v = recv.Interface().(value)
+ } else {
+ v = zero(st.Chan.Type().Underlying().(*types.Chan).Elem())
+ }
+ r = append(r, v)
+ }
+ }
+ fr.env[instr] = r
+
+ default:
+ panic(fmt.Sprintf("unexpected instruction: %T", instr))
+ }
+
+ // if val, ok := instr.(ssa.Value); ok {
+ // fmt.Println(toString(fr.env[val])) // debugging
+ // }
+
+ return kNext
+}
+
+// prepareCall determines the function value and argument values for a
+// function call in a Call, Go or Defer instruction, performing
+// interface method lookup if needed.
+//
+func prepareCall(fr *frame, call *ssa.CallCommon) (fn value, args []value) {
+ v := fr.get(call.Value)
+ if call.Method == nil {
+ // Function call.
+ fn = v
+ } else {
+ // Interface method invocation.
+ recv := v.(iface)
+ if recv.t == nil {
+ panic("method invoked on nil interface")
+ }
+ if f := lookupMethod(fr.i, recv.t, call.Method); f == nil {
+ // Unreachable in well-typed programs.
+ panic(fmt.Sprintf("method set for dynamic type %v does not contain %s", recv.t, call.Method))
+ } else {
+ fn = f
+ }
+ args = append(args, copyVal(recv.v))
+ }
+ for _, arg := range call.Args {
+ args = append(args, fr.get(arg))
+ }
+ return
+}
+
+// call interprets a call to a function (function, builtin or closure)
+// fn with arguments args, returning its result.
+// callpos is the position of the callsite.
+//
+func call(i *interpreter, caller *frame, callpos token.Pos, fn value, args []value) value {
+ switch fn := fn.(type) {
+ case *ssa.Function:
+ if fn == nil {
+ panic("call of nil function") // nil of func type
+ }
+ return callSSA(i, caller, callpos, fn, args, nil)
+ case *closure:
+ return callSSA(i, caller, callpos, fn.Fn, args, fn.Env)
+ case *ssa.Builtin:
+ return callBuiltin(caller, callpos, fn, args)
+ }
+ panic(fmt.Sprintf("cannot call %T", fn))
+}
+
+func loc(fset *token.FileSet, pos token.Pos) string {
+ if pos == token.NoPos {
+ return ""
+ }
+ return " at " + fset.Position(pos).String()
+}
+
+// callSSA interprets a call to function fn with arguments args,
+// and lexical environment env, returning its result.
+// callpos is the position of the callsite.
+//
+func callSSA(i *interpreter, caller *frame, callpos token.Pos, fn *ssa.Function, args []value, env []value) value {
+ if i.mode&EnableTracing != 0 {
+ fset := fn.Prog.Fset
+ // TODO(adonovan): fix: loc() lies for external functions.
+ fmt.Fprintf(os.Stderr, "Entering %s%s.\n", fn, loc(fset, fn.Pos()))
+ suffix := ""
+ if caller != nil {
+ suffix = ", resuming " + caller.fn.String() + loc(fset, callpos)
+ }
+ defer fmt.Fprintf(os.Stderr, "Leaving %s%s.\n", fn, suffix)
+ }
+ fr := &frame{
+ i: i,
+ caller: caller, // for panic/recover
+ fn: fn,
+ }
+ if fn.Parent() == nil {
+ name := fn.String()
+ if ext := externals[name]; ext != nil {
+ if i.mode&EnableTracing != 0 {
+ fmt.Fprintln(os.Stderr, "\t(external)")
+ }
+ return ext(fr, args)
+ }
+ if fn.Blocks == nil {
+ panic("no code for function: " + name)
+ }
+ }
+ fr.env = make(map[ssa.Value]value)
+ fr.block = fn.Blocks[0]
+ fr.locals = make([]value, len(fn.Locals))
+ for i, l := range fn.Locals {
+ fr.locals[i] = zero(deref(l.Type()))
+ fr.env[l] = &fr.locals[i]
+ }
+ for i, p := range fn.Params {
+ fr.env[p] = args[i]
+ }
+ for i, fv := range fn.FreeVars {
+ fr.env[fv] = env[i]
+ }
+ for fr.block != nil {
+ runFrame(fr)
+ }
+ // Destroy the locals to avoid accidental use after return.
+ for i := range fn.Locals {
+ fr.locals[i] = bad{}
+ }
+ return fr.result
+}
+
+// runFrame executes SSA instructions starting at fr.block and
+// continuing until a return, a panic, or a recovered panic.
+//
+// After a panic, runFrame panics.
+//
+// After a normal return, fr.result contains the result of the call
+// and fr.block is nil.
+//
+// A recovered panic in a function without named return parameters
+// (NRPs) becomes a normal return of the zero value of the function's
+// result type.
+//
+// After a recovered panic in a function with NRPs, fr.result is
+// undefined and fr.block contains the block at which to resume
+// control.
+//
+func runFrame(fr *frame) {
+ defer func() {
+ if fr.block == nil {
+ return // normal return
+ }
+ if fr.i.mode&DisableRecover != 0 {
+ return // let interpreter crash
+ }
+ fr.panicking = true
+ fr.panic = recover()
+ if fr.i.mode&EnableTracing != 0 {
+ fmt.Fprintf(os.Stderr, "Panicking: %T %v.\n", fr.panic, fr.panic)
+ }
+ fr.runDefers()
+ fr.block = fr.fn.Recover
+ }()
+
+ for {
+ if fr.i.mode&EnableTracing != 0 {
+ fmt.Fprintf(os.Stderr, ".%s:\n", fr.block)
+ }
+ block:
+ for _, instr := range fr.block.Instrs {
+ if fr.i.mode&EnableTracing != 0 {
+ if v, ok := instr.(ssa.Value); ok {
+ fmt.Fprintln(os.Stderr, "\t", v.Name(), "=", instr)
+ } else {
+ fmt.Fprintln(os.Stderr, "\t", instr)
+ }
+ }
+ switch visitInstr(fr, instr) {
+ case kReturn:
+ return
+ case kNext:
+ // no-op
+ case kJump:
+ break block
+ }
+ }
+ }
+}
+
+// doRecover implements the recover() built-in.
+func doRecover(caller *frame) value {
+ // recover() must be exactly one level beneath the deferred
+ // function (two levels beneath the panicking function) to
+ // have any effect. Thus we ignore both "defer recover()" and
+ // "defer f() -> g() -> recover()".
+ if caller.i.mode&DisableRecover == 0 &&
+ caller != nil && !caller.panicking &&
+ caller.caller != nil && caller.caller.panicking {
+ caller.caller.panicking = false
+ p := caller.caller.panic
+ caller.caller.panic = nil
+ switch p := p.(type) {
+ case targetPanic:
+ // The target program explicitly called panic().
+ return p.v
+ case runtime.Error:
+ // The interpreter encountered a runtime error.
+ return iface{caller.i.runtimeErrorString, p.Error()}
+ case string:
+ // The interpreter explicitly called panic().
+ return iface{caller.i.runtimeErrorString, p}
+ default:
+ panic(fmt.Sprintf("unexpected panic type %T in target call to recover()", p))
+ }
+ }
+ return iface{}
+}
+
+// setGlobal sets the value of a system-initialized global variable.
+func setGlobal(i *interpreter, pkg *ssa.Package, name string, v value) {
+ if g, ok := i.globals[pkg.Var(name)]; ok {
+ *g = v
+ return
+ }
+ panic("no global variable: " + pkg.Object.Path() + "." + name)
+}
+
+var environ []value
+
+func init() {
+ for _, s := range os.Environ() {
+ environ = append(environ, s)
+ }
+ environ = append(environ, "GOSSAINTERP=1")
+ environ = append(environ, "GOARCH="+runtime.GOARCH)
+}
+
+// Interpret interprets the Go program whose main package is mainpkg.
+// mode specifies various interpreter options. filename and args are
+// the initial values of os.Args for the target program. sizes is the
+// effective type-sizing function for this program.
+//
+// Interpret returns the exit code of the program: 2 for panic (like
+// gc does), or the argument to os.Exit for normal termination.
+//
+// The SSA program must include the "runtime" package.
+//
+func Interpret(mainpkg *ssa.Package, mode Mode, sizes types.Sizes, filename string, args []string) (exitCode int) {
+ i := &interpreter{
+ prog: mainpkg.Prog,
+ globals: make(map[ssa.Value]*value),
+ mode: mode,
+ sizes: sizes,
+ }
+ runtimePkg := i.prog.ImportedPackage("runtime")
+ if runtimePkg == nil {
+ panic("ssa.Program doesn't include runtime package")
+ }
+ i.runtimeErrorString = runtimePkg.Type("errorString").Object().Type()
+
+ initReflect(i)
+
+ i.osArgs = append(i.osArgs, filename)
+ for _, arg := range args {
+ i.osArgs = append(i.osArgs, arg)
+ }
+
+ for _, pkg := range i.prog.AllPackages() {
+ // Initialize global storage.
+ for _, m := range pkg.Members {
+ switch v := m.(type) {
+ case *ssa.Global:
+ cell := zero(deref(v.Type()))
+ i.globals[v] = &cell
+ }
+ }
+
+ // Ad-hoc initialization for magic system variables.
+ switch pkg.Object.Path() {
+ case "syscall":
+ setGlobal(i, pkg, "envs", environ)
+
+ case "runtime":
+ sz := sizes.Sizeof(pkg.Object.Scope().Lookup("MemStats").Type())
+ setGlobal(i, pkg, "sizeof_C_MStats", uintptr(sz))
+
+ // Delete the bodies of almost all "runtime" functions since they're magic.
+ // A missing intrinsic leads to a very clear error.
+ for _, mem := range pkg.Members {
+ if fn, ok := mem.(*ssa.Function); ok {
+ switch fn.Name() {
+ case "GOROOT", "gogetenv":
+ // keep
+ default:
+ fn.Blocks = nil
+ }
+ }
+ }
+ }
+ }
+
+ // Top-level error handler.
+ exitCode = 2
+ defer func() {
+ if exitCode != 2 || i.mode&DisableRecover != 0 {
+ return
+ }
+ switch p := recover().(type) {
+ case exitPanic:
+ exitCode = int(p)
+ return
+ case targetPanic:
+ fmt.Fprintln(os.Stderr, "panic:", toString(p.v))
+ case runtime.Error:
+ fmt.Fprintln(os.Stderr, "panic:", p.Error())
+ case string:
+ fmt.Fprintln(os.Stderr, "panic:", p)
+ default:
+ fmt.Fprintf(os.Stderr, "panic: unexpected type: %T\n", p)
+ }
+
+ // TODO(adonovan): dump panicking interpreter goroutine?
+ // buf := make([]byte, 0x10000)
+ // runtime.Stack(buf, false)
+ // fmt.Fprintln(os.Stderr, string(buf))
+ // (Or dump panicking target goroutine?)
+ }()
+
+ // Run!
+ call(i, nil, token.NoPos, mainpkg.Func("init"), nil)
+ if mainFn := mainpkg.Func("main"); mainFn != nil {
+ call(i, nil, token.NoPos, mainFn, nil)
+ exitCode = 0
+ } else {
+ fmt.Fprintln(os.Stderr, "No main function.")
+ exitCode = 1
+ }
+ return
+}
+
+// deref returns a pointer's element type; otherwise it returns typ.
+// TODO(adonovan): Import from ssa?
+func deref(typ types.Type) types.Type {
+ if p, ok := typ.Underlying().(*types.Pointer); ok {
+ return p.Elem()
+ }
+ return typ
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/interp_test.go b/llgo/third_party/go.tools/go/ssa/interp/interp_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..7c3080783e95bfaf9d4db448a575e66c66f70135
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/interp_test.go
@@ -0,0 +1,365 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !windows,!plan9
+
+package interp_test
+
+import (
+ "bytes"
+ "fmt"
+ "go/build"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/ssa/interp"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// Each line contains a space-separated list of $GOROOT/test/
+// filenames comprising the main package of a program.
+// They are ordered quickest-first, roughly.
+//
+// TODO(adonovan): integrate into the $GOROOT/test driver scripts,
+// golden file checking, etc.
+var gorootTestTests = []string{
+ "235.go",
+ "alias1.go",
+ "chancap.go",
+ "func5.go",
+ "func6.go",
+ "func7.go",
+ "func8.go",
+ "helloworld.go",
+ "varinit.go",
+ "escape3.go",
+ "initcomma.go",
+ "cmp.go",
+ "compos.go",
+ "turing.go",
+ "indirect.go",
+ "complit.go",
+ "for.go",
+ "struct0.go",
+ "intcvt.go",
+ "printbig.go",
+ "deferprint.go",
+ "escape.go",
+ "range.go",
+ "const4.go",
+ "float_lit.go",
+ "bigalg.go",
+ "decl.go",
+ "if.go",
+ "named.go",
+ "bigmap.go",
+ "func.go",
+ "reorder2.go",
+ "closure.go",
+ "gc.go",
+ "simassign.go",
+ "iota.go",
+ "nilptr2.go",
+ "goprint.go", // doesn't actually assert anything (cmpout)
+ "utf.go",
+ "method.go",
+ "char_lit.go",
+ "env.go",
+ "int_lit.go",
+ "string_lit.go",
+ "defer.go",
+ "typeswitch.go",
+ "stringrange.go",
+ "reorder.go",
+ "method3.go",
+ "literal.go",
+ "nul1.go", // doesn't actually assert anything (errorcheckoutput)
+ "zerodivide.go",
+ "convert.go",
+ "convT2X.go",
+ "switch.go",
+ "initialize.go",
+ "ddd.go",
+ "blank.go", // partly disabled
+ "map.go",
+ "closedchan.go",
+ "divide.go",
+ "rename.go",
+ "const3.go",
+ "nil.go",
+ "recover.go", // reflection parts disabled
+ "recover1.go",
+ "recover2.go",
+ "recover3.go",
+ "typeswitch1.go",
+ "floatcmp.go",
+ "crlf.go", // doesn't actually assert anything (runoutput)
+ // Slow tests follow.
+ "bom.go", // ~1.7s
+ "gc1.go", // ~1.7s
+ "cmplxdivide.go cmplxdivide1.go", // ~2.4s
+
+ // Working, but not worth enabling:
+ // "append.go", // works, but slow (15s).
+ // "gc2.go", // works, but slow, and cheats on the memory check.
+ // "sigchld.go", // works, but only on POSIX.
+ // "peano.go", // works only up to n=9, and slow even then.
+ // "stack.go", // works, but too slow (~30s) by default.
+ // "solitaire.go", // works, but too slow (~30s).
+ // "const.go", // works but for but one bug: constant folder doesn't consider representations.
+ // "init1.go", // too slow (80s) and not that interesting. Cheats on ReadMemStats check too.
+ // "rotate.go rotate0.go", // emits source for a test
+ // "rotate.go rotate1.go", // emits source for a test
+ // "rotate.go rotate2.go", // emits source for a test
+ // "rotate.go rotate3.go", // emits source for a test
+ // "64bit.go", // emits source for a test
+ // "run.go", // test driver, not a test.
+
+ // Broken. TODO(adonovan): fix.
+ // copy.go // very slow; but with N=4 quickly crashes, slice index out of range.
+ // nilptr.go // interp: V > uintptr not implemented. Slow test, lots of mem
+ // args.go // works, but requires specific os.Args from the driver.
+ // index.go // a template, not a real test.
+ // mallocfin.go // SetFinalizer not implemented.
+
+ // TODO(adonovan): add tests from $GOROOT/test/* subtrees:
+ // bench chan bugs fixedbugs interface ken.
+}
+
+// These are files in go.tools/go/ssa/interp/testdata/.
+var testdataTests = []string{
+ "boundmeth.go",
+ "complit.go",
+ "coverage.go",
+ "defer.go",
+ "fieldprom.go",
+ "ifaceconv.go",
+ "ifaceprom.go",
+ "initorder.go",
+ "methprom.go",
+ "mrvchain.go",
+ "range.go",
+ "recover.go",
+ "static.go",
+ "callstack.go",
+}
+
+// These are files and packages in $GOROOT/src/.
+var gorootSrcTests = []string{
+ "encoding/ascii85",
+ "encoding/csv",
+ "encoding/hex",
+ "encoding/pem",
+ "hash/crc32",
+ // "testing", // TODO(adonovan): implement runtime.Goexit correctly
+ "text/scanner",
+ "unicode",
+
+ // Too slow:
+ // "container/ring",
+ // "hash/adler32",
+
+ // TODO(adonovan): packages with Examples require os.Pipe (unimplemented):
+ // "unicode/utf8",
+ // "log",
+ // "path",
+ // "flag",
+}
+
+type successPredicate func(exitcode int, output string) error
+
+func run(t *testing.T, dir, input string, success successPredicate) bool {
+ fmt.Printf("Input: %s\n", input)
+
+ start := time.Now()
+
+ var inputs []string
+ for _, i := range strings.Split(input, " ") {
+ if strings.HasSuffix(i, ".go") {
+ i = dir + i
+ }
+ inputs = append(inputs, i)
+ }
+
+ conf := loader.Config{SourceImports: true}
+ if _, err := conf.FromArgs(inputs, true); err != nil {
+ t.Errorf("FromArgs(%s) failed: %s", inputs, err)
+ return false
+ }
+
+ conf.Import("runtime")
+
+ // Print a helpful hint if we don't make it to the end.
+ var hint string
+ defer func() {
+ if hint != "" {
+ fmt.Println("FAIL")
+ fmt.Println(hint)
+ } else {
+ fmt.Println("PASS")
+ }
+
+ interp.CapturedOutput = nil
+ }()
+
+ hint = fmt.Sprintf("To dump SSA representation, run:\n%% go build golang.org/x/tools/cmd/ssadump && ./ssadump -build=CFP %s\n", input)
+
+ iprog, err := conf.Load()
+ if err != nil {
+ t.Errorf("conf.Load(%s) failed: %s", inputs, err)
+ return false
+ }
+
+ prog := ssa.Create(iprog, ssa.SanityCheckFunctions)
+ prog.BuildAll()
+
+ var mainPkg *ssa.Package
+ var initialPkgs []*ssa.Package
+ for _, info := range iprog.InitialPackages() {
+ if info.Pkg.Path() == "runtime" {
+ continue // not an initial package
+ }
+ p := prog.Package(info.Pkg)
+ initialPkgs = append(initialPkgs, p)
+ if mainPkg == nil && p.Func("main") != nil {
+ mainPkg = p
+ }
+ }
+ if mainPkg == nil {
+ testmainPkg := prog.CreateTestMainPackage(initialPkgs...)
+ if testmainPkg == nil {
+ t.Errorf("CreateTestMainPackage(%s) returned nil", mainPkg)
+ return false
+ }
+ if testmainPkg.Func("main") == nil {
+ t.Errorf("synthetic testmain package has no main")
+ return false
+ }
+ mainPkg = testmainPkg
+ }
+
+ var out bytes.Buffer
+ interp.CapturedOutput = &out
+
+ hint = fmt.Sprintf("To trace execution, run:\n%% go build golang.org/x/tools/cmd/ssadump && ./ssadump -build=C -run --interp=T %s\n", input)
+ exitCode := interp.Interpret(mainPkg, 0, &types.StdSizes{8, 8}, inputs[0], []string{})
+
+ // The definition of success varies with each file.
+ if err := success(exitCode, out.String()); err != nil {
+ t.Errorf("interp.Interpret(%s) failed: %s", inputs, err)
+ return false
+ }
+
+ hint = "" // call off the hounds
+
+ if false {
+ fmt.Println(input, time.Since(start)) // test profiling
+ }
+
+ return true
+}
+
+const slash = string(os.PathSeparator)
+
+func printFailures(failures []string) {
+ if failures != nil {
+ fmt.Println("The following tests failed:")
+ for _, f := range failures {
+ fmt.Printf("\t%s\n", f)
+ }
+ }
+}
+
+// The "normal" success predicate.
+func exitsZero(exitcode int, _ string) error {
+ if exitcode != 0 {
+ return fmt.Errorf("exit code was %d", exitcode)
+ }
+ return nil
+}
+
+// TestTestdataFiles runs the interpreter on testdata/*.go.
+func TestTestdataFiles(t *testing.T) {
+ var failures []string
+ for _, input := range testdataTests {
+ if !run(t, "testdata"+slash, input, exitsZero) {
+ failures = append(failures, input)
+ }
+ }
+ printFailures(failures)
+}
+
+// TestGorootTest runs the interpreter on $GOROOT/test/*.go.
+func TestGorootTest(t *testing.T) {
+ if testing.Short() {
+ return // too slow (~30s)
+ }
+
+ var failures []string
+
+ // $GOROOT/tests are also considered a failure if they print "BUG".
+ success := func(exitcode int, output string) error {
+ if exitcode != 0 {
+ return fmt.Errorf("exit code was %d", exitcode)
+ }
+ if strings.Contains(output, "BUG") {
+ return fmt.Errorf("exited zero but output contained 'BUG'")
+ }
+ return nil
+ }
+ for _, input := range gorootTestTests {
+ if !run(t, filepath.Join(build.Default.GOROOT, "test")+slash, input, success) {
+ failures = append(failures, input)
+ }
+ }
+ for _, input := range gorootSrcTests {
+ if !run(t, filepath.Join(build.Default.GOROOT, "src")+slash, input, success) {
+ failures = append(failures, input)
+ }
+ }
+ printFailures(failures)
+}
+
+// TestTestmainPackage runs the interpreter on a synthetic "testmain" package.
+func TestTestmainPackage(t *testing.T) {
+ success := func(exitcode int, output string) error {
+ if exitcode == 0 {
+ return fmt.Errorf("unexpected success")
+ }
+ if !strings.Contains(output, "FAIL: TestFoo") {
+ return fmt.Errorf("missing failure log for TestFoo")
+ }
+ if !strings.Contains(output, "FAIL: TestBar") {
+ return fmt.Errorf("missing failure log for TestBar")
+ }
+ // TODO(adonovan): test benchmarks too
+ return nil
+ }
+ run(t, "testdata"+slash, "a_test.go", success)
+}
+
+// CreateTestMainPackage should return nil if there were no tests.
+func TestNullTestmainPackage(t *testing.T) {
+ var conf loader.Config
+ if err := conf.CreateFromFilenames("", "testdata/b_test.go"); err != nil {
+ t.Fatalf("ParseFile failed: %s", err)
+ }
+ iprog, err := conf.Load()
+ if err != nil {
+ t.Fatalf("CreatePackages failed: %s", err)
+ }
+ prog := ssa.Create(iprog, ssa.SanityCheckFunctions)
+ mainPkg := prog.Package(iprog.Created[0].Pkg)
+ if mainPkg.Func("main") != nil {
+ t.Fatalf("unexpected main function")
+ }
+ if prog.CreateTestMainPackage(mainPkg) != nil {
+ t.Fatalf("CreateTestMainPackage returned non-nil")
+ }
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/map.go b/llgo/third_party/go.tools/go/ssa/interp/map.go
new file mode 100644
index 0000000000000000000000000000000000000000..ba77f7267b86508272797c7caa86af9a3d7d5c6e
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/map.go
@@ -0,0 +1,113 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package interp
+
+// Custom hashtable atop map.
+// For use when the key's equivalence relation is not consistent with ==.
+
+// The Go specification doesn't address the atomicity of map operations.
+// The FAQ states that an implementation is permitted to crash on
+// concurrent map access.
+
+import (
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+type hashable interface {
+ hash(t types.Type) int
+ eq(t types.Type, x interface{}) bool
+}
+
+type entry struct {
+ key hashable
+ value value
+ next *entry
+}
+
+// A hashtable atop the built-in map. Since each bucket contains
+// exactly one hash value, there's no need to perform hash-equality
+// tests when walking the linked list. Rehashing is done by the
+// underlying map.
+type hashmap struct {
+ keyType types.Type
+ table map[int]*entry
+ length int // number of entries in map
+}
+
+// makeMap returns an empty initialized map of key type kt,
+// preallocating space for reserve elements.
+func makeMap(kt types.Type, reserve int) value {
+ if usesBuiltinMap(kt) {
+ return make(map[value]value, reserve)
+ }
+ return &hashmap{keyType: kt, table: make(map[int]*entry, reserve)}
+}
+
+// delete removes the association for key k, if any.
+func (m *hashmap) delete(k hashable) {
+ if m != nil {
+ hash := k.hash(m.keyType)
+ head := m.table[hash]
+ if head != nil {
+ if k.eq(m.keyType, head.key) {
+ m.table[hash] = head.next
+ m.length--
+ return
+ }
+ prev := head
+ for e := head.next; e != nil; e = e.next {
+ if k.eq(m.keyType, e.key) {
+ prev.next = e.next
+ m.length--
+ return
+ }
+ prev = e
+ }
+ }
+ }
+}
+
+// lookup returns the value associated with key k, if present, or
+// value(nil) otherwise.
+func (m *hashmap) lookup(k hashable) value {
+ if m != nil {
+ hash := k.hash(m.keyType)
+ for e := m.table[hash]; e != nil; e = e.next {
+ if k.eq(m.keyType, e.key) {
+ return e.value
+ }
+ }
+ }
+ return nil
+}
+
+// insert updates the map to associate key k with value v. If there
+// was already an association for an eq() (though not necessarily ==)
+// k, the previous key remains in the map and its associated value is
+// updated.
+func (m *hashmap) insert(k hashable, v value) {
+ hash := k.hash(m.keyType)
+ head := m.table[hash]
+ for e := head; e != nil; e = e.next {
+ if k.eq(m.keyType, e.key) {
+ e.value = v
+ return
+ }
+ }
+ m.table[hash] = &entry{
+ key: k,
+ value: v,
+ next: head,
+ }
+ m.length++
+}
+
+// len returns the number of key/value associations in the map.
+func (m *hashmap) len() int {
+ if m != nil {
+ return m.length
+ }
+ return 0
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/ops.go b/llgo/third_party/go.tools/go/ssa/interp/ops.go
new file mode 100644
index 0000000000000000000000000000000000000000..fedae643eba0437e9bf644ef3b483acf70a732af
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/ops.go
@@ -0,0 +1,1396 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package interp
+
+import (
+ "bytes"
+ "fmt"
+ "go/token"
+ "strings"
+ "sync"
+ "unsafe"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// If the target program panics, the interpreter panics with this type.
+type targetPanic struct {
+ v value
+}
+
+func (p targetPanic) String() string {
+ return toString(p.v)
+}
+
+// If the target program calls exit, the interpreter panics with this type.
+type exitPanic int
+
+// constValue returns the value of the constant with the
+// dynamic type tag appropriate for c.Type().
+func constValue(c *ssa.Const) value {
+ if c.IsNil() {
+ return zero(c.Type()) // typed nil
+ }
+
+ if t, ok := c.Type().Underlying().(*types.Basic); ok {
+ // TODO(adonovan): eliminate untyped constants from SSA form.
+ switch t.Kind() {
+ case types.Bool, types.UntypedBool:
+ return exact.BoolVal(c.Value)
+ case types.Int, types.UntypedInt:
+ // Assume sizeof(int) is same on host and target.
+ return int(c.Int64())
+ case types.Int8:
+ return int8(c.Int64())
+ case types.Int16:
+ return int16(c.Int64())
+ case types.Int32, types.UntypedRune:
+ return int32(c.Int64())
+ case types.Int64:
+ return c.Int64()
+ case types.Uint:
+ // Assume sizeof(uint) is same on host and target.
+ return uint(c.Uint64())
+ case types.Uint8:
+ return uint8(c.Uint64())
+ case types.Uint16:
+ return uint16(c.Uint64())
+ case types.Uint32:
+ return uint32(c.Uint64())
+ case types.Uint64:
+ return c.Uint64()
+ case types.Uintptr:
+ // Assume sizeof(uintptr) is same on host and target.
+ return uintptr(c.Uint64())
+ case types.Float32:
+ return float32(c.Float64())
+ case types.Float64, types.UntypedFloat:
+ return c.Float64()
+ case types.Complex64:
+ return complex64(c.Complex128())
+ case types.Complex128, types.UntypedComplex:
+ return c.Complex128()
+ case types.String, types.UntypedString:
+ if c.Value.Kind() == exact.String {
+ return exact.StringVal(c.Value)
+ }
+ return string(rune(c.Int64()))
+ }
+ }
+
+ panic(fmt.Sprintf("constValue: %s", c))
+}
+
+// asInt converts x, which must be an integer, to an int suitable for
+// use as a slice or array index or operand to make().
+func asInt(x value) int {
+ switch x := x.(type) {
+ case int:
+ return x
+ case int8:
+ return int(x)
+ case int16:
+ return int(x)
+ case int32:
+ return int(x)
+ case int64:
+ return int(x)
+ case uint:
+ return int(x)
+ case uint8:
+ return int(x)
+ case uint16:
+ return int(x)
+ case uint32:
+ return int(x)
+ case uint64:
+ return int(x)
+ case uintptr:
+ return int(x)
+ }
+ panic(fmt.Sprintf("cannot convert %T to int", x))
+}
+
+// asUint64 converts x, which must be an unsigned integer, to a uint64
+// suitable for use as a bitwise shift count.
+func asUint64(x value) uint64 {
+ switch x := x.(type) {
+ case uint:
+ return uint64(x)
+ case uint8:
+ return uint64(x)
+ case uint16:
+ return uint64(x)
+ case uint32:
+ return uint64(x)
+ case uint64:
+ return x
+ case uintptr:
+ return uint64(x)
+ }
+ panic(fmt.Sprintf("cannot convert %T to uint64", x))
+}
+
+// zero returns a new "zero" value of the specified type.
+func zero(t types.Type) value {
+ switch t := t.(type) {
+ case *types.Basic:
+ if t.Kind() == types.UntypedNil {
+ panic("untyped nil has no zero value")
+ }
+ if t.Info()&types.IsUntyped != 0 {
+ // TODO(adonovan): make it an invariant that
+ // this is unreachable. Currently some
+ // constants have 'untyped' types when they
+ // should be defaulted by the typechecker.
+ t = ssa.DefaultType(t).(*types.Basic)
+ }
+ switch t.Kind() {
+ case types.Bool:
+ return false
+ case types.Int:
+ return int(0)
+ case types.Int8:
+ return int8(0)
+ case types.Int16:
+ return int16(0)
+ case types.Int32:
+ return int32(0)
+ case types.Int64:
+ return int64(0)
+ case types.Uint:
+ return uint(0)
+ case types.Uint8:
+ return uint8(0)
+ case types.Uint16:
+ return uint16(0)
+ case types.Uint32:
+ return uint32(0)
+ case types.Uint64:
+ return uint64(0)
+ case types.Uintptr:
+ return uintptr(0)
+ case types.Float32:
+ return float32(0)
+ case types.Float64:
+ return float64(0)
+ case types.Complex64:
+ return complex64(0)
+ case types.Complex128:
+ return complex128(0)
+ case types.String:
+ return ""
+ case types.UnsafePointer:
+ return unsafe.Pointer(nil)
+ default:
+ panic(fmt.Sprint("zero for unexpected type:", t))
+ }
+ case *types.Pointer:
+ return (*value)(nil)
+ case *types.Array:
+ a := make(array, t.Len())
+ for i := range a {
+ a[i] = zero(t.Elem())
+ }
+ return a
+ case *types.Named:
+ return zero(t.Underlying())
+ case *types.Interface:
+ return iface{} // nil type, methodset and value
+ case *types.Slice:
+ return []value(nil)
+ case *types.Struct:
+ s := make(structure, t.NumFields())
+ for i := range s {
+ s[i] = zero(t.Field(i).Type())
+ }
+ return s
+ case *types.Tuple:
+ if t.Len() == 1 {
+ return zero(t.At(0).Type())
+ }
+ s := make(tuple, t.Len())
+ for i := range s {
+ s[i] = zero(t.At(i).Type())
+ }
+ return s
+ case *types.Chan:
+ return chan value(nil)
+ case *types.Map:
+ if usesBuiltinMap(t.Key()) {
+ return map[value]value(nil)
+ }
+ return (*hashmap)(nil)
+ case *types.Signature:
+ return (*ssa.Function)(nil)
+ }
+ panic(fmt.Sprint("zero: unexpected ", t))
+}
+
+// slice returns x[lo:hi:max]. Any of lo, hi and max may be nil.
+func slice(x, lo, hi, max value) value {
+ var Len, Cap int
+ switch x := x.(type) {
+ case string:
+ Len = len(x)
+ case []value:
+ Len = len(x)
+ Cap = cap(x)
+ case *value: // *array
+ a := (*x).(array)
+ Len = len(a)
+ Cap = cap(a)
+ }
+
+ l := 0
+ if lo != nil {
+ l = asInt(lo)
+ }
+
+ h := Len
+ if hi != nil {
+ h = asInt(hi)
+ }
+
+ m := Cap
+ if max != nil {
+ m = asInt(max)
+ }
+
+ switch x := x.(type) {
+ case string:
+ return x[l:h]
+ case []value:
+ return x[l:h:m]
+ case *value: // *array
+ a := (*x).(array)
+ return []value(a)[l:h:m]
+ }
+ panic(fmt.Sprintf("slice: unexpected X type: %T", x))
+}
+
+// lookup returns x[idx] where x is a map or string.
+func lookup(instr *ssa.Lookup, x, idx value) value {
+ switch x := x.(type) { // map or string
+ case map[value]value, *hashmap:
+ var v value
+ var ok bool
+ switch x := x.(type) {
+ case map[value]value:
+ v, ok = x[idx]
+ case *hashmap:
+ v = x.lookup(idx.(hashable))
+ ok = v != nil
+ }
+ if ok {
+ v = copyVal(v)
+ } else {
+ v = zero(instr.X.Type().Underlying().(*types.Map).Elem())
+ }
+ if instr.CommaOk {
+ v = tuple{v, ok}
+ }
+ return v
+ case string:
+ return x[asInt(idx)]
+ }
+ panic(fmt.Sprintf("unexpected x type in Lookup: %T", x))
+}
+
+// binop implements all arithmetic and logical binary operators for
+// numeric datatypes and strings. Both operands must have identical
+// dynamic type.
+//
+func binop(op token.Token, t types.Type, x, y value) value {
+ switch op {
+ case token.ADD:
+ switch x.(type) {
+ case int:
+ return x.(int) + y.(int)
+ case int8:
+ return x.(int8) + y.(int8)
+ case int16:
+ return x.(int16) + y.(int16)
+ case int32:
+ return x.(int32) + y.(int32)
+ case int64:
+ return x.(int64) + y.(int64)
+ case uint:
+ return x.(uint) + y.(uint)
+ case uint8:
+ return x.(uint8) + y.(uint8)
+ case uint16:
+ return x.(uint16) + y.(uint16)
+ case uint32:
+ return x.(uint32) + y.(uint32)
+ case uint64:
+ return x.(uint64) + y.(uint64)
+ case uintptr:
+ return x.(uintptr) + y.(uintptr)
+ case float32:
+ return x.(float32) + y.(float32)
+ case float64:
+ return x.(float64) + y.(float64)
+ case complex64:
+ return x.(complex64) + y.(complex64)
+ case complex128:
+ return x.(complex128) + y.(complex128)
+ case string:
+ return x.(string) + y.(string)
+ }
+
+ case token.SUB:
+ switch x.(type) {
+ case int:
+ return x.(int) - y.(int)
+ case int8:
+ return x.(int8) - y.(int8)
+ case int16:
+ return x.(int16) - y.(int16)
+ case int32:
+ return x.(int32) - y.(int32)
+ case int64:
+ return x.(int64) - y.(int64)
+ case uint:
+ return x.(uint) - y.(uint)
+ case uint8:
+ return x.(uint8) - y.(uint8)
+ case uint16:
+ return x.(uint16) - y.(uint16)
+ case uint32:
+ return x.(uint32) - y.(uint32)
+ case uint64:
+ return x.(uint64) - y.(uint64)
+ case uintptr:
+ return x.(uintptr) - y.(uintptr)
+ case float32:
+ return x.(float32) - y.(float32)
+ case float64:
+ return x.(float64) - y.(float64)
+ case complex64:
+ return x.(complex64) - y.(complex64)
+ case complex128:
+ return x.(complex128) - y.(complex128)
+ }
+
+ case token.MUL:
+ switch x.(type) {
+ case int:
+ return x.(int) * y.(int)
+ case int8:
+ return x.(int8) * y.(int8)
+ case int16:
+ return x.(int16) * y.(int16)
+ case int32:
+ return x.(int32) * y.(int32)
+ case int64:
+ return x.(int64) * y.(int64)
+ case uint:
+ return x.(uint) * y.(uint)
+ case uint8:
+ return x.(uint8) * y.(uint8)
+ case uint16:
+ return x.(uint16) * y.(uint16)
+ case uint32:
+ return x.(uint32) * y.(uint32)
+ case uint64:
+ return x.(uint64) * y.(uint64)
+ case uintptr:
+ return x.(uintptr) * y.(uintptr)
+ case float32:
+ return x.(float32) * y.(float32)
+ case float64:
+ return x.(float64) * y.(float64)
+ case complex64:
+ return x.(complex64) * y.(complex64)
+ case complex128:
+ return x.(complex128) * y.(complex128)
+ }
+
+ case token.QUO:
+ switch x.(type) {
+ case int:
+ return x.(int) / y.(int)
+ case int8:
+ return x.(int8) / y.(int8)
+ case int16:
+ return x.(int16) / y.(int16)
+ case int32:
+ return x.(int32) / y.(int32)
+ case int64:
+ return x.(int64) / y.(int64)
+ case uint:
+ return x.(uint) / y.(uint)
+ case uint8:
+ return x.(uint8) / y.(uint8)
+ case uint16:
+ return x.(uint16) / y.(uint16)
+ case uint32:
+ return x.(uint32) / y.(uint32)
+ case uint64:
+ return x.(uint64) / y.(uint64)
+ case uintptr:
+ return x.(uintptr) / y.(uintptr)
+ case float32:
+ return x.(float32) / y.(float32)
+ case float64:
+ return x.(float64) / y.(float64)
+ case complex64:
+ return x.(complex64) / y.(complex64)
+ case complex128:
+ return x.(complex128) / y.(complex128)
+ }
+
+ case token.REM:
+ switch x.(type) {
+ case int:
+ return x.(int) % y.(int)
+ case int8:
+ return x.(int8) % y.(int8)
+ case int16:
+ return x.(int16) % y.(int16)
+ case int32:
+ return x.(int32) % y.(int32)
+ case int64:
+ return x.(int64) % y.(int64)
+ case uint:
+ return x.(uint) % y.(uint)
+ case uint8:
+ return x.(uint8) % y.(uint8)
+ case uint16:
+ return x.(uint16) % y.(uint16)
+ case uint32:
+ return x.(uint32) % y.(uint32)
+ case uint64:
+ return x.(uint64) % y.(uint64)
+ case uintptr:
+ return x.(uintptr) % y.(uintptr)
+ }
+
+ case token.AND:
+ switch x.(type) {
+ case int:
+ return x.(int) & y.(int)
+ case int8:
+ return x.(int8) & y.(int8)
+ case int16:
+ return x.(int16) & y.(int16)
+ case int32:
+ return x.(int32) & y.(int32)
+ case int64:
+ return x.(int64) & y.(int64)
+ case uint:
+ return x.(uint) & y.(uint)
+ case uint8:
+ return x.(uint8) & y.(uint8)
+ case uint16:
+ return x.(uint16) & y.(uint16)
+ case uint32:
+ return x.(uint32) & y.(uint32)
+ case uint64:
+ return x.(uint64) & y.(uint64)
+ case uintptr:
+ return x.(uintptr) & y.(uintptr)
+ }
+
+ case token.OR:
+ switch x.(type) {
+ case int:
+ return x.(int) | y.(int)
+ case int8:
+ return x.(int8) | y.(int8)
+ case int16:
+ return x.(int16) | y.(int16)
+ case int32:
+ return x.(int32) | y.(int32)
+ case int64:
+ return x.(int64) | y.(int64)
+ case uint:
+ return x.(uint) | y.(uint)
+ case uint8:
+ return x.(uint8) | y.(uint8)
+ case uint16:
+ return x.(uint16) | y.(uint16)
+ case uint32:
+ return x.(uint32) | y.(uint32)
+ case uint64:
+ return x.(uint64) | y.(uint64)
+ case uintptr:
+ return x.(uintptr) | y.(uintptr)
+ }
+
+ case token.XOR:
+ switch x.(type) {
+ case int:
+ return x.(int) ^ y.(int)
+ case int8:
+ return x.(int8) ^ y.(int8)
+ case int16:
+ return x.(int16) ^ y.(int16)
+ case int32:
+ return x.(int32) ^ y.(int32)
+ case int64:
+ return x.(int64) ^ y.(int64)
+ case uint:
+ return x.(uint) ^ y.(uint)
+ case uint8:
+ return x.(uint8) ^ y.(uint8)
+ case uint16:
+ return x.(uint16) ^ y.(uint16)
+ case uint32:
+ return x.(uint32) ^ y.(uint32)
+ case uint64:
+ return x.(uint64) ^ y.(uint64)
+ case uintptr:
+ return x.(uintptr) ^ y.(uintptr)
+ }
+
+ case token.AND_NOT:
+ switch x.(type) {
+ case int:
+ return x.(int) &^ y.(int)
+ case int8:
+ return x.(int8) &^ y.(int8)
+ case int16:
+ return x.(int16) &^ y.(int16)
+ case int32:
+ return x.(int32) &^ y.(int32)
+ case int64:
+ return x.(int64) &^ y.(int64)
+ case uint:
+ return x.(uint) &^ y.(uint)
+ case uint8:
+ return x.(uint8) &^ y.(uint8)
+ case uint16:
+ return x.(uint16) &^ y.(uint16)
+ case uint32:
+ return x.(uint32) &^ y.(uint32)
+ case uint64:
+ return x.(uint64) &^ y.(uint64)
+ case uintptr:
+ return x.(uintptr) &^ y.(uintptr)
+ }
+
+ case token.SHL:
+ y := asUint64(y)
+ switch x.(type) {
+ case int:
+ return x.(int) << y
+ case int8:
+ return x.(int8) << y
+ case int16:
+ return x.(int16) << y
+ case int32:
+ return x.(int32) << y
+ case int64:
+ return x.(int64) << y
+ case uint:
+ return x.(uint) << y
+ case uint8:
+ return x.(uint8) << y
+ case uint16:
+ return x.(uint16) << y
+ case uint32:
+ return x.(uint32) << y
+ case uint64:
+ return x.(uint64) << y
+ case uintptr:
+ return x.(uintptr) << y
+ }
+
+ case token.SHR:
+ y := asUint64(y)
+ switch x.(type) {
+ case int:
+ return x.(int) >> y
+ case int8:
+ return x.(int8) >> y
+ case int16:
+ return x.(int16) >> y
+ case int32:
+ return x.(int32) >> y
+ case int64:
+ return x.(int64) >> y
+ case uint:
+ return x.(uint) >> y
+ case uint8:
+ return x.(uint8) >> y
+ case uint16:
+ return x.(uint16) >> y
+ case uint32:
+ return x.(uint32) >> y
+ case uint64:
+ return x.(uint64) >> y
+ case uintptr:
+ return x.(uintptr) >> y
+ }
+
+ case token.LSS:
+ switch x.(type) {
+ case int:
+ return x.(int) < y.(int)
+ case int8:
+ return x.(int8) < y.(int8)
+ case int16:
+ return x.(int16) < y.(int16)
+ case int32:
+ return x.(int32) < y.(int32)
+ case int64:
+ return x.(int64) < y.(int64)
+ case uint:
+ return x.(uint) < y.(uint)
+ case uint8:
+ return x.(uint8) < y.(uint8)
+ case uint16:
+ return x.(uint16) < y.(uint16)
+ case uint32:
+ return x.(uint32) < y.(uint32)
+ case uint64:
+ return x.(uint64) < y.(uint64)
+ case uintptr:
+ return x.(uintptr) < y.(uintptr)
+ case float32:
+ return x.(float32) < y.(float32)
+ case float64:
+ return x.(float64) < y.(float64)
+ case string:
+ return x.(string) < y.(string)
+ }
+
+ case token.LEQ:
+ switch x.(type) {
+ case int:
+ return x.(int) <= y.(int)
+ case int8:
+ return x.(int8) <= y.(int8)
+ case int16:
+ return x.(int16) <= y.(int16)
+ case int32:
+ return x.(int32) <= y.(int32)
+ case int64:
+ return x.(int64) <= y.(int64)
+ case uint:
+ return x.(uint) <= y.(uint)
+ case uint8:
+ return x.(uint8) <= y.(uint8)
+ case uint16:
+ return x.(uint16) <= y.(uint16)
+ case uint32:
+ return x.(uint32) <= y.(uint32)
+ case uint64:
+ return x.(uint64) <= y.(uint64)
+ case uintptr:
+ return x.(uintptr) <= y.(uintptr)
+ case float32:
+ return x.(float32) <= y.(float32)
+ case float64:
+ return x.(float64) <= y.(float64)
+ case string:
+ return x.(string) <= y.(string)
+ }
+
+ case token.EQL:
+ return eqnil(t, x, y)
+
+ case token.NEQ:
+ return !eqnil(t, x, y)
+
+ case token.GTR:
+ switch x.(type) {
+ case int:
+ return x.(int) > y.(int)
+ case int8:
+ return x.(int8) > y.(int8)
+ case int16:
+ return x.(int16) > y.(int16)
+ case int32:
+ return x.(int32) > y.(int32)
+ case int64:
+ return x.(int64) > y.(int64)
+ case uint:
+ return x.(uint) > y.(uint)
+ case uint8:
+ return x.(uint8) > y.(uint8)
+ case uint16:
+ return x.(uint16) > y.(uint16)
+ case uint32:
+ return x.(uint32) > y.(uint32)
+ case uint64:
+ return x.(uint64) > y.(uint64)
+ case uintptr:
+ return x.(uintptr) > y.(uintptr)
+ case float32:
+ return x.(float32) > y.(float32)
+ case float64:
+ return x.(float64) > y.(float64)
+ case string:
+ return x.(string) > y.(string)
+ }
+
+ case token.GEQ:
+ switch x.(type) {
+ case int:
+ return x.(int) >= y.(int)
+ case int8:
+ return x.(int8) >= y.(int8)
+ case int16:
+ return x.(int16) >= y.(int16)
+ case int32:
+ return x.(int32) >= y.(int32)
+ case int64:
+ return x.(int64) >= y.(int64)
+ case uint:
+ return x.(uint) >= y.(uint)
+ case uint8:
+ return x.(uint8) >= y.(uint8)
+ case uint16:
+ return x.(uint16) >= y.(uint16)
+ case uint32:
+ return x.(uint32) >= y.(uint32)
+ case uint64:
+ return x.(uint64) >= y.(uint64)
+ case uintptr:
+ return x.(uintptr) >= y.(uintptr)
+ case float32:
+ return x.(float32) >= y.(float32)
+ case float64:
+ return x.(float64) >= y.(float64)
+ case string:
+ return x.(string) >= y.(string)
+ }
+ }
+ panic(fmt.Sprintf("invalid binary op: %T %s %T", x, op, y))
+}
+
+// eqnil returns the comparison x == y using the equivalence relation
+// appropriate for type t.
+// If t is a reference type, at most one of x or y may be a nil value
+// of that type.
+//
+func eqnil(t types.Type, x, y value) bool {
+ switch t.Underlying().(type) {
+ case *types.Map, *types.Signature, *types.Slice:
+ // Since these types don't support comparison,
+ // one of the operands must be a literal nil.
+ switch x := x.(type) {
+ case *hashmap:
+ return (x != nil) == (y.(*hashmap) != nil)
+ case map[value]value:
+ return (x != nil) == (y.(map[value]value) != nil)
+ case *ssa.Function:
+ switch y := y.(type) {
+ case *ssa.Function:
+ return (x != nil) == (y != nil)
+ case *closure:
+ return true
+ }
+ case *closure:
+ return (x != nil) == (y.(*ssa.Function) != nil)
+ case []value:
+ return (x != nil) == (y.([]value) != nil)
+ }
+ panic(fmt.Sprintf("eqnil(%s): illegal dynamic type: %T", t, x))
+ }
+
+ return equals(t, x, y)
+}
+
+func unop(instr *ssa.UnOp, x value) value {
+ switch instr.Op {
+ case token.ARROW: // receive
+ v, ok := <-x.(chan value)
+ if !ok {
+ v = zero(instr.X.Type().Underlying().(*types.Chan).Elem())
+ }
+ if instr.CommaOk {
+ v = tuple{v, ok}
+ }
+ return v
+ case token.SUB:
+ switch x := x.(type) {
+ case int:
+ return -x
+ case int8:
+ return -x
+ case int16:
+ return -x
+ case int32:
+ return -x
+ case int64:
+ return -x
+ case uint:
+ return -x
+ case uint8:
+ return -x
+ case uint16:
+ return -x
+ case uint32:
+ return -x
+ case uint64:
+ return -x
+ case uintptr:
+ return -x
+ case float32:
+ return -x
+ case float64:
+ return -x
+ case complex64:
+ return -x
+ case complex128:
+ return -x
+ }
+ case token.MUL:
+ return copyVal(*x.(*value)) // load
+ case token.NOT:
+ return !x.(bool)
+ case token.XOR:
+ switch x := x.(type) {
+ case int:
+ return ^x
+ case int8:
+ return ^x
+ case int16:
+ return ^x
+ case int32:
+ return ^x
+ case int64:
+ return ^x
+ case uint:
+ return ^x
+ case uint8:
+ return ^x
+ case uint16:
+ return ^x
+ case uint32:
+ return ^x
+ case uint64:
+ return ^x
+ case uintptr:
+ return ^x
+ }
+ }
+ panic(fmt.Sprintf("invalid unary op %s %T", instr.Op, x))
+}
+
+// typeAssert checks whether dynamic type of itf is instr.AssertedType.
+// It returns the extracted value on success, and panics on failure,
+// unless instr.CommaOk, in which case it always returns a "value,ok" tuple.
+//
+func typeAssert(i *interpreter, instr *ssa.TypeAssert, itf iface) value {
+ var v value
+ err := ""
+ if itf.t == nil {
+ err = fmt.Sprintf("interface conversion: interface is nil, not %s", instr.AssertedType)
+
+ } else if idst, ok := instr.AssertedType.Underlying().(*types.Interface); ok {
+ v = itf
+ err = checkInterface(i, idst, itf)
+
+ } else if types.Identical(itf.t, instr.AssertedType) {
+ v = copyVal(itf.v) // extract value
+
+ } else {
+ err = fmt.Sprintf("interface conversion: interface is %s, not %s", itf.t, instr.AssertedType)
+ }
+
+ if err != "" {
+ if !instr.CommaOk {
+ panic(err)
+ }
+ return tuple{zero(instr.AssertedType), false}
+ }
+ if instr.CommaOk {
+ return tuple{v, true}
+ }
+ return v
+}
+
+// If CapturedOutput is non-nil, all writes by the interpreted program
+// to file descriptors 1 and 2 will also be written to CapturedOutput.
+//
+// (The $GOROOT/test system requires that the test be considered a
+// failure if "BUG" appears in the combined stdout/stderr output, even
+// if it exits zero. This is a global variable shared by all
+// interpreters in the same process.)
+//
+var CapturedOutput *bytes.Buffer
+var capturedOutputMu sync.Mutex
+
+// write writes bytes b to the target program's file descriptor fd.
+// The print/println built-ins and the write() system call funnel
+// through here so they can be captured by the test driver.
+func write(fd int, b []byte) (int, error) {
+ // TODO(adonovan): fix: on Windows, std{out,err} are not 1, 2.
+ if CapturedOutput != nil && (fd == 1 || fd == 2) {
+ capturedOutputMu.Lock()
+ CapturedOutput.Write(b) // ignore errors
+ capturedOutputMu.Unlock()
+ }
+ return syswrite(fd, b)
+}
+
+// callBuiltin interprets a call to builtin fn with arguments args,
+// returning its result.
+func callBuiltin(caller *frame, callpos token.Pos, fn *ssa.Builtin, args []value) value {
+ switch fn.Name() {
+ case "append":
+ if len(args) == 1 {
+ return args[0]
+ }
+ if s, ok := args[1].(string); ok {
+ // append([]byte, ...string) []byte
+ arg0 := args[0].([]value)
+ for i := 0; i < len(s); i++ {
+ arg0 = append(arg0, s[i])
+ }
+ return arg0
+ }
+ // append([]T, ...[]T) []T
+ return append(args[0].([]value), args[1].([]value)...)
+
+ case "copy": // copy([]T, []T) int or copy([]byte, string) int
+ src := args[1]
+ if _, ok := src.(string); ok {
+ params := fn.Type().(*types.Signature).Params()
+ src = conv(params.At(0).Type(), params.At(1).Type(), src)
+ }
+ return copy(args[0].([]value), src.([]value))
+
+ case "close": // close(chan T)
+ close(args[0].(chan value))
+ return nil
+
+ case "delete": // delete(map[K]value, K)
+ switch m := args[0].(type) {
+ case map[value]value:
+ delete(m, args[1])
+ case *hashmap:
+ m.delete(args[1].(hashable))
+ default:
+ panic(fmt.Sprintf("illegal map type: %T", m))
+ }
+ return nil
+
+ case "print", "println": // print(any, ...)
+ ln := fn.Name() == "println"
+ var buf bytes.Buffer
+ for i, arg := range args {
+ if i > 0 && ln {
+ buf.WriteRune(' ')
+ }
+ buf.WriteString(toString(arg))
+ }
+ if ln {
+ buf.WriteRune('\n')
+ }
+ write(1, buf.Bytes())
+ return nil
+
+ case "len":
+ switch x := args[0].(type) {
+ case string:
+ return len(x)
+ case array:
+ return len(x)
+ case *value:
+ return len((*x).(array))
+ case []value:
+ return len(x)
+ case map[value]value:
+ return len(x)
+ case *hashmap:
+ return x.len()
+ case chan value:
+ return len(x)
+ default:
+ panic(fmt.Sprintf("len: illegal operand: %T", x))
+ }
+
+ case "cap":
+ switch x := args[0].(type) {
+ case array:
+ return cap(x)
+ case *value:
+ return cap((*x).(array))
+ case []value:
+ return cap(x)
+ case chan value:
+ return cap(x)
+ default:
+ panic(fmt.Sprintf("cap: illegal operand: %T", x))
+ }
+
+ case "real":
+ switch c := args[0].(type) {
+ case complex64:
+ return real(c)
+ case complex128:
+ return real(c)
+ default:
+ panic(fmt.Sprintf("real: illegal operand: %T", c))
+ }
+
+ case "imag":
+ switch c := args[0].(type) {
+ case complex64:
+ return imag(c)
+ case complex128:
+ return imag(c)
+ default:
+ panic(fmt.Sprintf("imag: illegal operand: %T", c))
+ }
+
+ case "complex":
+ switch f := args[0].(type) {
+ case float32:
+ return complex(f, args[1].(float32))
+ case float64:
+ return complex(f, args[1].(float64))
+ default:
+ panic(fmt.Sprintf("complex: illegal operand: %T", f))
+ }
+
+ case "panic":
+ // ssa.Panic handles most cases; this is only for "go
+ // panic" or "defer panic".
+ panic(targetPanic{args[0]})
+
+ case "recover":
+ return doRecover(caller)
+
+ case "ssa:wrapnilchk":
+ recv := args[0]
+ if recv.(*value) == nil {
+ recvType := args[1]
+ methodName := args[2]
+ panic(fmt.Sprintf("value method (%s).%s called using nil *%s pointer",
+ recvType, methodName, recvType))
+ }
+ return recv
+ }
+
+ panic("unknown built-in: " + fn.Name())
+}
+
+func rangeIter(x value, t types.Type) iter {
+ switch x := x.(type) {
+ case map[value]value:
+ // TODO(adonovan): fix: leaks goroutines and channels
+ // on each incomplete map iteration. We need to open
+ // up an iteration interface using the
+ // reflect.(Value).MapKeys machinery.
+ it := make(mapIter)
+ go func() {
+ for k, v := range x {
+ it <- [2]value{k, v}
+ }
+ close(it)
+ }()
+ return it
+ case *hashmap:
+ // TODO(adonovan): fix: leaks goroutines and channels
+ // on each incomplete map iteration. We need to open
+ // up an iteration interface using the
+ // reflect.(Value).MapKeys machinery.
+ it := make(mapIter)
+ go func() {
+ for _, e := range x.table {
+ for e != nil {
+ it <- [2]value{e.key, e.value}
+ e = e.next
+ }
+ }
+ close(it)
+ }()
+ return it
+ case string:
+ return &stringIter{Reader: strings.NewReader(x)}
+ }
+ panic(fmt.Sprintf("cannot range over %T", x))
+}
+
+// widen widens a basic typed value x to the widest type of its
+// category, one of:
+// bool, int64, uint64, float64, complex128, string.
+// This is inefficient but reduces the size of the cross-product of
+// cases we have to consider.
+//
+func widen(x value) value {
+ switch y := x.(type) {
+ case bool, int64, uint64, float64, complex128, string, unsafe.Pointer:
+ return x
+ case int:
+ return int64(y)
+ case int8:
+ return int64(y)
+ case int16:
+ return int64(y)
+ case int32:
+ return int64(y)
+ case uint:
+ return uint64(y)
+ case uint8:
+ return uint64(y)
+ case uint16:
+ return uint64(y)
+ case uint32:
+ return uint64(y)
+ case uintptr:
+ return uint64(y)
+ case float32:
+ return float64(y)
+ case complex64:
+ return complex128(y)
+ }
+ panic(fmt.Sprintf("cannot widen %T", x))
+}
+
+// conv converts the value x of type t_src to type t_dst and returns
+// the result.
+// Possible cases are described with the ssa.Convert operator.
+//
+func conv(t_dst, t_src types.Type, x value) value {
+ ut_src := t_src.Underlying()
+ ut_dst := t_dst.Underlying()
+
+ // Destination type is not an "untyped" type.
+ if b, ok := ut_dst.(*types.Basic); ok && b.Info()&types.IsUntyped != 0 {
+ panic("oops: conversion to 'untyped' type: " + b.String())
+ }
+
+ // Nor is it an interface type.
+ if _, ok := ut_dst.(*types.Interface); ok {
+ if _, ok := ut_src.(*types.Interface); ok {
+ panic("oops: Convert should be ChangeInterface")
+ } else {
+ panic("oops: Convert should be MakeInterface")
+ }
+ }
+
+ // Remaining conversions:
+ // + untyped string/number/bool constant to a specific
+ // representation.
+ // + conversions between non-complex numeric types.
+ // + conversions between complex numeric types.
+ // + integer/[]byte/[]rune -> string.
+ // + string -> []byte/[]rune.
+ //
+ // All are treated the same: first we extract the value to the
+ // widest representation (int64, uint64, float64, complex128,
+ // or string), then we convert it to the desired type.
+
+ switch ut_src := ut_src.(type) {
+ case *types.Pointer:
+ switch ut_dst := ut_dst.(type) {
+ case *types.Basic:
+ // *value to unsafe.Pointer?
+ if ut_dst.Kind() == types.UnsafePointer {
+ return unsafe.Pointer(x.(*value))
+ }
+ }
+
+ case *types.Slice:
+ // []byte or []rune -> string
+ // TODO(adonovan): fix: type B byte; conv([]B -> string).
+ switch ut_src.Elem().(*types.Basic).Kind() {
+ case types.Byte:
+ x := x.([]value)
+ b := make([]byte, 0, len(x))
+ for i := range x {
+ b = append(b, x[i].(byte))
+ }
+ return string(b)
+
+ case types.Rune:
+ x := x.([]value)
+ r := make([]rune, 0, len(x))
+ for i := range x {
+ r = append(r, x[i].(rune))
+ }
+ return string(r)
+ }
+
+ case *types.Basic:
+ x = widen(x)
+
+ // integer -> string?
+ // TODO(adonovan): fix: test integer -> named alias of string.
+ if ut_src.Info()&types.IsInteger != 0 {
+ if ut_dst, ok := ut_dst.(*types.Basic); ok && ut_dst.Kind() == types.String {
+ return string(asInt(x))
+ }
+ }
+
+ // string -> []rune, []byte or string?
+ if s, ok := x.(string); ok {
+ switch ut_dst := ut_dst.(type) {
+ case *types.Slice:
+ var res []value
+ // TODO(adonovan): fix: test named alias of rune, byte.
+ switch ut_dst.Elem().(*types.Basic).Kind() {
+ case types.Rune:
+ for _, r := range []rune(s) {
+ res = append(res, r)
+ }
+ return res
+ case types.Byte:
+ for _, b := range []byte(s) {
+ res = append(res, b)
+ }
+ return res
+ }
+ case *types.Basic:
+ if ut_dst.Kind() == types.String {
+ return x.(string)
+ }
+ }
+ break // fail: no other conversions for string
+ }
+
+ // unsafe.Pointer -> *value
+ if ut_src.Kind() == types.UnsafePointer {
+ // TODO(adonovan): this is wrong and cannot
+ // really be fixed with the current design.
+ //
+ // return (*value)(x.(unsafe.Pointer))
+ // creates a new pointer of a different
+ // type but the underlying interface value
+ // knows its "true" type and so cannot be
+ // meaningfully used through the new pointer.
+ //
+ // To make this work, the interpreter needs to
+ // simulate the memory layout of a real
+ // compiled implementation.
+ //
+ // To at least preserve type-safety, we'll
+ // just return the zero value of the
+ // destination type.
+ return zero(t_dst)
+ }
+
+ // Conversions between complex numeric types?
+ if ut_src.Info()&types.IsComplex != 0 {
+ switch ut_dst.(*types.Basic).Kind() {
+ case types.Complex64:
+ return complex64(x.(complex128))
+ case types.Complex128:
+ return x.(complex128)
+ }
+ break // fail: no other conversions for complex
+ }
+
+ // Conversions between non-complex numeric types?
+ if ut_src.Info()&types.IsNumeric != 0 {
+ kind := ut_dst.(*types.Basic).Kind()
+ switch x := x.(type) {
+ case int64: // signed integer -> numeric?
+ switch kind {
+ case types.Int:
+ return int(x)
+ case types.Int8:
+ return int8(x)
+ case types.Int16:
+ return int16(x)
+ case types.Int32:
+ return int32(x)
+ case types.Int64:
+ return int64(x)
+ case types.Uint:
+ return uint(x)
+ case types.Uint8:
+ return uint8(x)
+ case types.Uint16:
+ return uint16(x)
+ case types.Uint32:
+ return uint32(x)
+ case types.Uint64:
+ return uint64(x)
+ case types.Uintptr:
+ return uintptr(x)
+ case types.Float32:
+ return float32(x)
+ case types.Float64:
+ return float64(x)
+ }
+
+ case uint64: // unsigned integer -> numeric?
+ switch kind {
+ case types.Int:
+ return int(x)
+ case types.Int8:
+ return int8(x)
+ case types.Int16:
+ return int16(x)
+ case types.Int32:
+ return int32(x)
+ case types.Int64:
+ return int64(x)
+ case types.Uint:
+ return uint(x)
+ case types.Uint8:
+ return uint8(x)
+ case types.Uint16:
+ return uint16(x)
+ case types.Uint32:
+ return uint32(x)
+ case types.Uint64:
+ return uint64(x)
+ case types.Uintptr:
+ return uintptr(x)
+ case types.Float32:
+ return float32(x)
+ case types.Float64:
+ return float64(x)
+ }
+
+ case float64: // floating point -> numeric?
+ switch kind {
+ case types.Int:
+ return int(x)
+ case types.Int8:
+ return int8(x)
+ case types.Int16:
+ return int16(x)
+ case types.Int32:
+ return int32(x)
+ case types.Int64:
+ return int64(x)
+ case types.Uint:
+ return uint(x)
+ case types.Uint8:
+ return uint8(x)
+ case types.Uint16:
+ return uint16(x)
+ case types.Uint32:
+ return uint32(x)
+ case types.Uint64:
+ return uint64(x)
+ case types.Uintptr:
+ return uintptr(x)
+ case types.Float32:
+ return float32(x)
+ case types.Float64:
+ return float64(x)
+ }
+ }
+ }
+ }
+
+ panic(fmt.Sprintf("unsupported conversion: %s -> %s, dynamic type %T", t_src, t_dst, x))
+}
+
+// checkInterface checks that the method set of x implements the
+// interface itype.
+// On success it returns "", on failure, an error message.
+//
+func checkInterface(i *interpreter, itype *types.Interface, x iface) string {
+ if meth, _ := types.MissingMethod(x.t, itype, true); meth != nil {
+ return fmt.Sprintf("interface conversion: %v is not %v: missing method %s",
+ x.t, itype, meth.Name())
+ }
+ return "" // ok
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/reflect.go b/llgo/third_party/go.tools/go/ssa/interp/reflect.go
new file mode 100644
index 0000000000000000000000000000000000000000..4acba617469d345ce347bcb0092f4f0da62ab9e7
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/reflect.go
@@ -0,0 +1,521 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package interp
+
+// Emulated "reflect" package.
+//
+// We completely replace the built-in "reflect" package.
+// The only thing clients can depend upon are that reflect.Type is an
+// interface and reflect.Value is an (opaque) struct.
+
+import (
+ "fmt"
+ "go/token"
+ "reflect"
+ "unsafe"
+
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+type opaqueType struct {
+ types.Type
+ name string
+}
+
+func (t *opaqueType) String() string { return t.name }
+
+// A bogus "reflect" type-checker package. Shared across interpreters.
+var reflectTypesPackage = types.NewPackage("reflect", "reflect")
+
+// rtype is the concrete type the interpreter uses to implement the
+// reflect.Type interface. Since its type is opaque to the target
+// language, we use a types.Basic.
+//
+// type rtype
+var rtypeType = makeNamedType("rtype", &opaqueType{nil, "rtype"})
+
+// error is an (interpreted) named type whose underlying type is string.
+// The interpreter uses it for all implementations of the built-in error
+// interface that it creates.
+// We put it in the "reflect" package for expedience.
+//
+// type error string
+var errorType = makeNamedType("error", &opaqueType{nil, "error"})
+
+func makeNamedType(name string, underlying types.Type) *types.Named {
+ obj := types.NewTypeName(token.NoPos, reflectTypesPackage, name, nil)
+ return types.NewNamed(obj, underlying, nil)
+}
+
+func makeReflectValue(t types.Type, v value) value {
+ return structure{rtype{t}, v}
+}
+
+// Given a reflect.Value, returns its rtype.
+func rV2T(v value) rtype {
+ return v.(structure)[0].(rtype)
+}
+
+// Given a reflect.Value, returns the underlying interpreter value.
+func rV2V(v value) value {
+ return v.(structure)[1]
+}
+
+// makeReflectType boxes up an rtype in a reflect.Type interface.
+func makeReflectType(rt rtype) value {
+ return iface{rtypeType, rt}
+}
+
+func ext۰reflect۰Init(fr *frame, args []value) value {
+ // Signature: func()
+ return nil
+}
+
+func ext۰reflect۰rtype۰Bits(fr *frame, args []value) value {
+ // Signature: func (t reflect.rtype) int
+ rt := args[0].(rtype).t
+ basic, ok := rt.Underlying().(*types.Basic)
+ if !ok {
+ panic(fmt.Sprintf("reflect.Type.Bits(%T): non-basic type", rt))
+ }
+ return int(fr.i.sizes.Sizeof(basic)) * 8
+}
+
+func ext۰reflect۰rtype۰Elem(fr *frame, args []value) value {
+ // Signature: func (t reflect.rtype) reflect.Type
+ return makeReflectType(rtype{args[0].(rtype).t.Underlying().(interface {
+ Elem() types.Type
+ }).Elem()})
+}
+
+func ext۰reflect۰rtype۰Field(fr *frame, args []value) value {
+ // Signature: func (t reflect.rtype, i int) reflect.StructField
+ st := args[0].(rtype).t.Underlying().(*types.Struct)
+ i := args[1].(int)
+ f := st.Field(i)
+ return structure{
+ f.Name(),
+ f.Pkg().Path(),
+ makeReflectType(rtype{f.Type()}),
+ st.Tag(i),
+ 0, // TODO(adonovan): offset
+ []value{}, // TODO(adonovan): indices
+ f.Anonymous(),
+ }
+}
+
+func ext۰reflect۰rtype۰Kind(fr *frame, args []value) value {
+ // Signature: func (t reflect.rtype) uint
+ return uint(reflectKind(args[0].(rtype).t))
+}
+
+func ext۰reflect۰rtype۰NumField(fr *frame, args []value) value {
+ // Signature: func (t reflect.rtype) int
+ return args[0].(rtype).t.Underlying().(*types.Struct).NumFields()
+}
+
+func ext۰reflect۰rtype۰NumMethod(fr *frame, args []value) value {
+ // Signature: func (t reflect.rtype) int
+ return fr.i.prog.MethodSets.MethodSet(args[0].(rtype).t).Len()
+}
+
+func ext۰reflect۰rtype۰NumOut(fr *frame, args []value) value {
+ // Signature: func (t reflect.rtype) int
+ return args[0].(rtype).t.(*types.Signature).Results().Len()
+}
+
+func ext۰reflect۰rtype۰Out(fr *frame, args []value) value {
+ // Signature: func (t reflect.rtype, i int) int
+ i := args[1].(int)
+ return makeReflectType(rtype{args[0].(rtype).t.(*types.Signature).Results().At(i).Type()})
+}
+
+func ext۰reflect۰rtype۰Size(fr *frame, args []value) value {
+ // Signature: func (t reflect.rtype) uintptr
+ return uintptr(fr.i.sizes.Sizeof(args[0].(rtype).t))
+}
+
+func ext۰reflect۰rtype۰String(fr *frame, args []value) value {
+ // Signature: func (t reflect.rtype) string
+ return args[0].(rtype).t.String()
+}
+
+func ext۰reflect۰New(fr *frame, args []value) value {
+ // Signature: func (t reflect.Type) reflect.Value
+ t := args[0].(iface).v.(rtype).t
+ alloc := zero(t)
+ return makeReflectValue(types.NewPointer(t), &alloc)
+}
+
+func ext۰reflect۰TypeOf(fr *frame, args []value) value {
+ // Signature: func (t reflect.rtype) string
+ return makeReflectType(rtype{args[0].(iface).t})
+}
+
+func ext۰reflect۰ValueOf(fr *frame, args []value) value {
+ // Signature: func (interface{}) reflect.Value
+ itf := args[0].(iface)
+ return makeReflectValue(itf.t, itf.v)
+}
+
+func reflectKind(t types.Type) reflect.Kind {
+ switch t := t.(type) {
+ case *types.Named:
+ return reflectKind(t.Underlying())
+ case *types.Basic:
+ switch t.Kind() {
+ case types.Bool:
+ return reflect.Bool
+ case types.Int:
+ return reflect.Int
+ case types.Int8:
+ return reflect.Int8
+ case types.Int16:
+ return reflect.Int16
+ case types.Int32:
+ return reflect.Int32
+ case types.Int64:
+ return reflect.Int64
+ case types.Uint:
+ return reflect.Uint
+ case types.Uint8:
+ return reflect.Uint8
+ case types.Uint16:
+ return reflect.Uint16
+ case types.Uint32:
+ return reflect.Uint32
+ case types.Uint64:
+ return reflect.Uint64
+ case types.Uintptr:
+ return reflect.Uintptr
+ case types.Float32:
+ return reflect.Float32
+ case types.Float64:
+ return reflect.Float64
+ case types.Complex64:
+ return reflect.Complex64
+ case types.Complex128:
+ return reflect.Complex128
+ case types.String:
+ return reflect.String
+ case types.UnsafePointer:
+ return reflect.UnsafePointer
+ }
+ case *types.Array:
+ return reflect.Array
+ case *types.Chan:
+ return reflect.Chan
+ case *types.Signature:
+ return reflect.Func
+ case *types.Interface:
+ return reflect.Interface
+ case *types.Map:
+ return reflect.Map
+ case *types.Pointer:
+ return reflect.Ptr
+ case *types.Slice:
+ return reflect.Slice
+ case *types.Struct:
+ return reflect.Struct
+ }
+ panic(fmt.Sprint("unexpected type: ", t))
+}
+
+func ext۰reflect۰Value۰Kind(fr *frame, args []value) value {
+ // Signature: func (reflect.Value) uint
+ return uint(reflectKind(rV2T(args[0]).t))
+}
+
+func ext۰reflect۰Value۰String(fr *frame, args []value) value {
+ // Signature: func (reflect.Value) string
+ return toString(rV2V(args[0]))
+}
+
+func ext۰reflect۰Value۰Type(fr *frame, args []value) value {
+ // Signature: func (reflect.Value) reflect.Type
+ return makeReflectType(rV2T(args[0]))
+}
+
+func ext۰reflect۰Value۰Uint(fr *frame, args []value) value {
+ // Signature: func (reflect.Value) uint64
+ switch v := rV2V(args[0]).(type) {
+ case uint:
+ return uint64(v)
+ case uint8:
+ return uint64(v)
+ case uint16:
+ return uint64(v)
+ case uint32:
+ return uint64(v)
+ case uint64:
+ return uint64(v)
+ case uintptr:
+ return uint64(v)
+ }
+ panic("reflect.Value.Uint")
+}
+
+func ext۰reflect۰Value۰Len(fr *frame, args []value) value {
+ // Signature: func (reflect.Value) int
+ switch v := rV2V(args[0]).(type) {
+ case string:
+ return len(v)
+ case array:
+ return len(v)
+ case chan value:
+ return cap(v)
+ case []value:
+ return len(v)
+ case *hashmap:
+ return v.len()
+ case map[value]value:
+ return len(v)
+ default:
+ panic(fmt.Sprintf("reflect.(Value).Len(%v)", v))
+ }
+}
+
+func ext۰reflect۰Value۰MapIndex(fr *frame, args []value) value {
+ // Signature: func (reflect.Value) Value
+ tValue := rV2T(args[0]).t.Underlying().(*types.Map).Key()
+ k := rV2V(args[1])
+ switch m := rV2V(args[0]).(type) {
+ case map[value]value:
+ if v, ok := m[k]; ok {
+ return makeReflectValue(tValue, v)
+ }
+
+ case *hashmap:
+ if v := m.lookup(k.(hashable)); v != nil {
+ return makeReflectValue(tValue, v)
+ }
+
+ default:
+ panic(fmt.Sprintf("(reflect.Value).MapIndex(%T, %T)", m, k))
+ }
+ return makeReflectValue(nil, nil)
+}
+
+func ext۰reflect۰Value۰MapKeys(fr *frame, args []value) value {
+ // Signature: func (reflect.Value) []Value
+ var keys []value
+ tKey := rV2T(args[0]).t.Underlying().(*types.Map).Key()
+ switch v := rV2V(args[0]).(type) {
+ case map[value]value:
+ for k := range v {
+ keys = append(keys, makeReflectValue(tKey, k))
+ }
+
+ case *hashmap:
+ for _, e := range v.table {
+ for ; e != nil; e = e.next {
+ keys = append(keys, makeReflectValue(tKey, e.key))
+ }
+ }
+
+ default:
+ panic(fmt.Sprintf("(reflect.Value).MapKeys(%T)", v))
+ }
+ return keys
+}
+
+func ext۰reflect۰Value۰NumField(fr *frame, args []value) value {
+ // Signature: func (reflect.Value) int
+ return len(rV2V(args[0]).(structure))
+}
+
+func ext۰reflect۰Value۰NumMethod(fr *frame, args []value) value {
+ // Signature: func (reflect.Value) int
+ return fr.i.prog.MethodSets.MethodSet(rV2T(args[0]).t).Len()
+}
+
+func ext۰reflect۰Value۰Pointer(fr *frame, args []value) value {
+ // Signature: func (v reflect.Value) uintptr
+ switch v := rV2V(args[0]).(type) {
+ case *value:
+ return uintptr(unsafe.Pointer(v))
+ case chan value:
+ return reflect.ValueOf(v).Pointer()
+ case []value:
+ return reflect.ValueOf(v).Pointer()
+ case *hashmap:
+ return reflect.ValueOf(v.table).Pointer()
+ case map[value]value:
+ return reflect.ValueOf(v).Pointer()
+ case *ssa.Function:
+ return uintptr(unsafe.Pointer(v))
+ case *closure:
+ return uintptr(unsafe.Pointer(v))
+ default:
+ panic(fmt.Sprintf("reflect.(Value).Pointer(%T)", v))
+ }
+}
+
+func ext۰reflect۰Value۰Index(fr *frame, args []value) value {
+ // Signature: func (v reflect.Value, i int) Value
+ i := args[1].(int)
+ t := rV2T(args[0]).t.Underlying()
+ switch v := rV2V(args[0]).(type) {
+ case array:
+ return makeReflectValue(t.(*types.Array).Elem(), v[i])
+ case []value:
+ return makeReflectValue(t.(*types.Slice).Elem(), v[i])
+ default:
+ panic(fmt.Sprintf("reflect.(Value).Index(%T)", v))
+ }
+}
+
+func ext۰reflect۰Value۰Bool(fr *frame, args []value) value {
+ // Signature: func (reflect.Value) bool
+ return rV2V(args[0]).(bool)
+}
+
+func ext۰reflect۰Value۰CanAddr(fr *frame, args []value) value {
+ // Signature: func (v reflect.Value) bool
+ // Always false for our representation.
+ return false
+}
+
+func ext۰reflect۰Value۰CanInterface(fr *frame, args []value) value {
+ // Signature: func (v reflect.Value) bool
+ // Always true for our representation.
+ return true
+}
+
+func ext۰reflect۰Value۰Elem(fr *frame, args []value) value {
+ // Signature: func (v reflect.Value) reflect.Value
+ switch x := rV2V(args[0]).(type) {
+ case iface:
+ return makeReflectValue(x.t, x.v)
+ case *value:
+ return makeReflectValue(rV2T(args[0]).t.Underlying().(*types.Pointer).Elem(), *x)
+ default:
+ panic(fmt.Sprintf("reflect.(Value).Elem(%T)", x))
+ }
+}
+
+func ext۰reflect۰Value۰Field(fr *frame, args []value) value {
+ // Signature: func (v reflect.Value, i int) reflect.Value
+ v := args[0]
+ i := args[1].(int)
+ return makeReflectValue(rV2T(v).t.Underlying().(*types.Struct).Field(i).Type(), rV2V(v).(structure)[i])
+}
+
+func ext۰reflect۰Value۰Float(fr *frame, args []value) value {
+ // Signature: func (reflect.Value) float64
+ switch v := rV2V(args[0]).(type) {
+ case float32:
+ return float64(v)
+ case float64:
+ return float64(v)
+ }
+ panic("reflect.Value.Float")
+}
+
+func ext۰reflect۰Value۰Interface(fr *frame, args []value) value {
+ // Signature: func (v reflect.Value) interface{}
+ return ext۰reflect۰valueInterface(fr, args)
+}
+
+func ext۰reflect۰Value۰Int(fr *frame, args []value) value {
+ // Signature: func (reflect.Value) int64
+ switch x := rV2V(args[0]).(type) {
+ case int:
+ return int64(x)
+ case int8:
+ return int64(x)
+ case int16:
+ return int64(x)
+ case int32:
+ return int64(x)
+ case int64:
+ return x
+ default:
+ panic(fmt.Sprintf("reflect.(Value).Int(%T)", x))
+ }
+}
+
+func ext۰reflect۰Value۰IsNil(fr *frame, args []value) value {
+ // Signature: func (reflect.Value) bool
+ switch x := rV2V(args[0]).(type) {
+ case *value:
+ return x == nil
+ case chan value:
+ return x == nil
+ case map[value]value:
+ return x == nil
+ case *hashmap:
+ return x == nil
+ case iface:
+ return x.t == nil
+ case []value:
+ return x == nil
+ case *ssa.Function:
+ return x == nil
+ case *ssa.Builtin:
+ return x == nil
+ case *closure:
+ return x == nil
+ default:
+ panic(fmt.Sprintf("reflect.(Value).IsNil(%T)", x))
+ }
+}
+
+func ext۰reflect۰Value۰IsValid(fr *frame, args []value) value {
+ // Signature: func (reflect.Value) bool
+ return rV2V(args[0]) != nil
+}
+
+func ext۰reflect۰Value۰Set(fr *frame, args []value) value {
+ // TODO(adonovan): implement.
+ return nil
+}
+
+func ext۰reflect۰valueInterface(fr *frame, args []value) value {
+ // Signature: func (v reflect.Value, safe bool) interface{}
+ v := args[0].(structure)
+ return iface{rV2T(v).t, rV2V(v)}
+}
+
+func ext۰reflect۰error۰Error(fr *frame, args []value) value {
+ return args[0]
+}
+
+// newMethod creates a new method of the specified name, package and receiver type.
+func newMethod(pkg *ssa.Package, recvType types.Type, name string) *ssa.Function {
+ // TODO(adonovan): fix: hack: currently the only part of Signature
+ // that is needed is the "pointerness" of Recv.Type, and for
+ // now, we'll set it to always be false since we're only
+ // concerned with rtype. Encapsulate this better.
+ sig := types.NewSignature(nil, types.NewVar(token.NoPos, nil, "recv", recvType), nil, nil, false)
+ fn := pkg.Prog.NewFunction(name, sig, "fake reflect method")
+ fn.Pkg = pkg
+ return fn
+}
+
+func initReflect(i *interpreter) {
+ i.reflectPackage = &ssa.Package{
+ Prog: i.prog,
+ Object: reflectTypesPackage,
+ Members: make(map[string]ssa.Member),
+ }
+
+ i.rtypeMethods = methodSet{
+ "Bits": newMethod(i.reflectPackage, rtypeType, "Bits"),
+ "Elem": newMethod(i.reflectPackage, rtypeType, "Elem"),
+ "Field": newMethod(i.reflectPackage, rtypeType, "Field"),
+ "Kind": newMethod(i.reflectPackage, rtypeType, "Kind"),
+ "NumField": newMethod(i.reflectPackage, rtypeType, "NumField"),
+ "NumMethod": newMethod(i.reflectPackage, rtypeType, "NumMethod"),
+ "NumOut": newMethod(i.reflectPackage, rtypeType, "NumOut"),
+ "Out": newMethod(i.reflectPackage, rtypeType, "Out"),
+ "Size": newMethod(i.reflectPackage, rtypeType, "Size"),
+ "String": newMethod(i.reflectPackage, rtypeType, "String"),
+ }
+ i.errorMethods = methodSet{
+ "Error": newMethod(i.reflectPackage, errorType, "Error"),
+ }
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/testdata/a_test.go b/llgo/third_party/go.tools/go/ssa/interp/testdata/a_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..844ec5cdc607d2f87a1e431581f7cb75b4945916
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/testdata/a_test.go
@@ -0,0 +1,17 @@
+package a
+
+import "testing"
+
+func TestFoo(t *testing.T) {
+ t.Error("foo")
+}
+
+func TestBar(t *testing.T) {
+ t.Error("bar")
+}
+
+func BenchmarkWiz(b *testing.B) {
+ b.Error("wiz")
+}
+
+// Don't test Examples since that testing package needs pipe(2) for that.
diff --git a/llgo/third_party/go.tools/go/ssa/interp/testdata/b_test.go b/llgo/third_party/go.tools/go/ssa/interp/testdata/b_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..4a30e96a8523ca8ab91dfd128bec3289aa4ab724
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/testdata/b_test.go
@@ -0,0 +1,11 @@
+package b
+
+import "testing"
+
+func NotATest(t *testing.T) {
+ t.Error("foo")
+}
+
+func NotABenchmark(b *testing.B) {
+ b.Error("wiz")
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/testdata/boundmeth.go b/llgo/third_party/go.tools/go/ssa/interp/testdata/boundmeth.go
new file mode 100644
index 0000000000000000000000000000000000000000..255cc60703cfa8c4b74eb6b7f9451009f7fa500c
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/testdata/boundmeth.go
@@ -0,0 +1,144 @@
+// Tests of bound method closures.
+
+package main
+
+import "fmt"
+
+func assert(b bool) {
+ if !b {
+ panic("oops")
+ }
+}
+
+type I int
+
+func (i I) add(x int) int {
+ return int(i) + x
+}
+
+func valueReceiver() {
+ var three I = 3
+ assert(three.add(5) == 8)
+ var add3 func(int) int = three.add
+ assert(add3(5) == 8)
+}
+
+type S struct{ x int }
+
+func (s *S) incr() {
+ s.x++
+}
+
+func (s *S) get() int {
+ return s.x
+}
+
+func pointerReceiver() {
+ ps := new(S)
+ incr := ps.incr
+ get := ps.get
+ assert(get() == 0)
+ incr()
+ incr()
+ incr()
+ assert(get() == 3)
+}
+
+func addressibleValuePointerReceiver() {
+ var s S
+ incr := s.incr
+ get := s.get
+ assert(get() == 0)
+ incr()
+ incr()
+ incr()
+ assert(get() == 3)
+}
+
+type S2 struct {
+ S
+}
+
+func promotedReceiver() {
+ var s2 S2
+ incr := s2.incr
+ get := s2.get
+ assert(get() == 0)
+ incr()
+ incr()
+ incr()
+ assert(get() == 3)
+}
+
+func anonStruct() {
+ var s struct{ S }
+ incr := s.incr
+ get := s.get
+ assert(get() == 0)
+ incr()
+ incr()
+ incr()
+ assert(get() == 3)
+}
+
+func typeCheck() {
+ var i interface{}
+ i = (*S).incr
+ _ = i.(func(*S)) // type assertion: receiver type prepended to params
+
+ var s S
+ i = s.incr
+ _ = i.(func()) // type assertion: receiver type disappears
+}
+
+type errString string
+
+func (err errString) Error() string {
+ return string(err)
+}
+
+// Regression test for a builder crash.
+func regress1(x error) func() string {
+ return x.Error
+}
+
+// Regression test for b/7269:
+// taking the value of an interface method performs a nil check.
+func nilInterfaceMethodValue() {
+ err := fmt.Errorf("ok")
+ f := err.Error
+ if got := f(); got != "ok" {
+ panic(got)
+ }
+
+ err = nil
+ if got := f(); got != "ok" {
+ panic(got)
+ }
+
+ defer func() {
+ r := fmt.Sprint(recover())
+ // runtime panic string varies across toolchains
+ if r != "runtime error: interface conversion: interface is nil, not error" &&
+ r != "runtime error: invalid memory address or nil pointer dereference" {
+ panic("want runtime panic from nil interface method value, got " + r)
+ }
+ }()
+ f = err.Error // runtime panic: err is nil
+ panic("unreachable")
+}
+
+func main() {
+ valueReceiver()
+ pointerReceiver()
+ addressibleValuePointerReceiver()
+ promotedReceiver()
+ anonStruct()
+ typeCheck()
+
+ if e := regress1(errString("hi"))(); e != "hi" {
+ panic(e)
+ }
+
+ nilInterfaceMethodValue()
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/testdata/callstack.go b/llgo/third_party/go.tools/go/ssa/interp/testdata/callstack.go
new file mode 100644
index 0000000000000000000000000000000000000000..56f3b28124b2445115d6ca7d5b28d8c477b7d731
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/testdata/callstack.go
@@ -0,0 +1,52 @@
+package main
+
+import (
+ "fmt"
+ "path"
+ "runtime"
+ "strings"
+)
+
+var stack string
+
+func f() {
+ pc := make([]uintptr, 6)
+ pc = pc[:runtime.Callers(1, pc)]
+ for _, f := range pc {
+ Func := runtime.FuncForPC(f)
+ name := Func.Name()
+ if strings.Contains(name, "$") || strings.Contains(name, ".func") {
+ name = "func" // anon funcs vary across toolchains
+ }
+ file, line := Func.FileLine(0)
+ stack += fmt.Sprintf("%s at %s:%d\n", name, path.Base(file), line)
+ }
+}
+
+func g() { f() }
+func h() { g() }
+func i() { func() { h() }() }
+
+// Hack: the 'func' and the call to Caller are on the same line,
+// to paper over differences between toolchains.
+// (The interpreter's location info isn't yet complete.)
+func runtimeCaller0() (uintptr, string, int, bool) { return runtime.Caller(0) }
+
+func main() {
+ i()
+ if stack != `main.f at callstack.go:12
+main.g at callstack.go:26
+main.h at callstack.go:27
+func at callstack.go:28
+main.i at callstack.go:28
+main.main at callstack.go:35
+` {
+ panic("unexpected stack: " + stack)
+ }
+
+ pc, file, line, _ := runtimeCaller0()
+ got := fmt.Sprintf("%s @ %s:%d", runtime.FuncForPC(pc).Name(), path.Base(file), line)
+ if got != "main.runtimeCaller0 @ callstack.go:33" {
+ panic("runtime.Caller: " + got)
+ }
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/testdata/complit.go b/llgo/third_party/go.tools/go/ssa/interp/testdata/complit.go
new file mode 100644
index 0000000000000000000000000000000000000000..c44fc0068e02dc2ea196dbc7d2a88a93ef21f359
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/testdata/complit.go
@@ -0,0 +1,84 @@
+package main
+
+// Tests of composite literals.
+
+import "fmt"
+
+// Map literals.
+func init() {
+ type M map[int]int
+ m1 := []*M{{1: 1}, &M{2: 2}}
+ want := "map[1:1] map[2:2]"
+ if got := fmt.Sprint(*m1[0], *m1[1]); got != want {
+ panic(got)
+ }
+ m2 := []M{{1: 1}, M{2: 2}}
+ if got := fmt.Sprint(m2[0], m2[1]); got != want {
+ panic(got)
+ }
+}
+
+// Nonliteral keys in composite literal.
+func init() {
+ const zero int = 1
+ var v = []int{1 + zero: 42}
+ if x := fmt.Sprint(v); x != "[0 0 42]" {
+ panic(x)
+ }
+}
+
+// Test for in-place initialization.
+func init() {
+ // struct
+ type S struct {
+ a, b int
+ }
+ s := S{1, 2}
+ s = S{b: 3}
+ if s.a != 0 {
+ panic("s.a != 0")
+ }
+ if s.b != 3 {
+ panic("s.b != 3")
+ }
+ s = S{}
+ if s.a != 0 {
+ panic("s.a != 0")
+ }
+ if s.b != 0 {
+ panic("s.b != 0")
+ }
+
+ // array
+ type A [4]int
+ a := A{2, 4, 6, 8}
+ a = A{1: 6, 2: 4}
+ if a[0] != 0 {
+ panic("a[0] != 0")
+ }
+ if a[1] != 6 {
+ panic("a[1] != 6")
+ }
+ if a[2] != 4 {
+ panic("a[2] != 4")
+ }
+ if a[3] != 0 {
+ panic("a[3] != 0")
+ }
+ a = A{}
+ if a[0] != 0 {
+ panic("a[0] != 0")
+ }
+ if a[1] != 0 {
+ panic("a[1] != 0")
+ }
+ if a[2] != 0 {
+ panic("a[2] != 0")
+ }
+ if a[3] != 0 {
+ panic("a[3] != 0")
+ }
+}
+
+func main() {
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/testdata/coverage.go b/llgo/third_party/go.tools/go/ssa/interp/testdata/coverage.go
new file mode 100644
index 0000000000000000000000000000000000000000..ca65643d1e8a93f4147965be12d9adb5ee9a634c
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/testdata/coverage.go
@@ -0,0 +1,496 @@
+// This interpreter test is designed to run very quickly yet provide
+// some coverage of a broad selection of constructs.
+//
+// Validate this file with 'go run' after editing.
+// TODO(adonovan): break this into small files organized by theme.
+
+package main
+
+import (
+ "fmt"
+ "reflect"
+)
+
+func init() {
+ // Call of variadic function with (implicit) empty slice.
+ if x := fmt.Sprint(); x != "" {
+ panic(x)
+ }
+}
+
+type empty interface{}
+
+type I interface {
+ f() int
+}
+
+type T struct{ z int }
+
+func (t T) f() int { return t.z }
+
+func use(interface{}) {}
+
+var counter = 2
+
+// Test initialization, including init blocks containing 'return'.
+// Assertion is in main.
+func init() {
+ counter *= 3
+ return
+ counter *= 3
+}
+
+func init() {
+ counter *= 5
+ return
+ counter *= 5
+}
+
+// Recursion.
+func fib(x int) int {
+ if x < 2 {
+ return x
+ }
+ return fib(x-1) + fib(x-2)
+}
+
+func fibgen(ch chan int) {
+ for x := 0; x < 10; x++ {
+ ch <- fib(x)
+ }
+ close(ch)
+}
+
+// Goroutines and channels.
+func init() {
+ ch := make(chan int)
+ go fibgen(ch)
+ var fibs []int
+ for v := range ch {
+ fibs = append(fibs, v)
+ if len(fibs) == 10 {
+ break
+ }
+ }
+ if x := fmt.Sprint(fibs); x != "[0 1 1 2 3 5 8 13 21 34]" {
+ panic(x)
+ }
+}
+
+// Test of aliasing.
+func init() {
+ type S struct {
+ a, b string
+ }
+
+ s1 := []string{"foo", "bar"}
+ s2 := s1 // creates an alias
+ s2[0] = "wiz"
+ if x := fmt.Sprint(s1, s2); x != "[wiz bar] [wiz bar]" {
+ panic(x)
+ }
+
+ pa1 := &[2]string{"foo", "bar"}
+ pa2 := pa1 // creates an alias
+ (*pa2)[0] = "wiz" // * required to workaround typechecker bug
+ if x := fmt.Sprint(*pa1, *pa2); x != "[wiz bar] [wiz bar]" {
+ panic(x)
+ }
+
+ a1 := [2]string{"foo", "bar"}
+ a2 := a1 // creates a copy
+ a2[0] = "wiz"
+ if x := fmt.Sprint(a1, a2); x != "[foo bar] [wiz bar]" {
+ panic(x)
+ }
+
+ t1 := S{"foo", "bar"}
+ t2 := t1 // copy
+ t2.a = "wiz"
+ if x := fmt.Sprint(t1, t2); x != "{foo bar} {wiz bar}" {
+ panic(x)
+ }
+}
+
+func main() {
+ print() // legal
+
+ if counter != 2*3*5 {
+ panic(counter)
+ }
+
+ // Test builtins (e.g. complex) preserve named argument types.
+ type N complex128
+ var n N
+ n = complex(1.0, 2.0)
+ if n != complex(1.0, 2.0) {
+ panic(n)
+ }
+ if x := reflect.TypeOf(n).String(); x != "main.N" {
+ panic(x)
+ }
+ if real(n) != 1.0 || imag(n) != 2.0 {
+ panic(n)
+ }
+
+ // Channel + select.
+ ch := make(chan int, 1)
+ select {
+ case ch <- 1:
+ // ok
+ default:
+ panic("couldn't send")
+ }
+ if <-ch != 1 {
+ panic("couldn't receive")
+ }
+ // A "receive" select-case that doesn't declare its vars. (regression test)
+ anint := 0
+ ok := false
+ select {
+ case anint, ok = <-ch:
+ case anint = <-ch:
+ default:
+ }
+ _ = anint
+ _ = ok
+
+ // Anon structs with methods.
+ anon := struct{ T }{T: T{z: 1}}
+ if x := anon.f(); x != 1 {
+ panic(x)
+ }
+ var i I = anon
+ if x := i.f(); x != 1 {
+ panic(x)
+ }
+ // NB. precise output of reflect.Type.String is undefined.
+ if x := reflect.TypeOf(i).String(); x != "struct { main.T }" && x != "struct{main.T}" {
+ panic(x)
+ }
+
+ // fmt.
+ const message = "Hello, World!"
+ if fmt.Sprintf("%s, %s!", "Hello", "World") != message {
+ panic("oops")
+ }
+
+ // Type assertion.
+ type S struct {
+ f int
+ }
+ var e empty = S{f: 42}
+ switch v := e.(type) {
+ case S:
+ if v.f != 42 {
+ panic(v.f)
+ }
+ default:
+ panic(reflect.TypeOf(v))
+ }
+ if i, ok := e.(I); ok {
+ panic(i)
+ }
+
+ // Switch.
+ var x int
+ switch x {
+ case 1:
+ panic(x)
+ fallthrough
+ case 2, 3:
+ panic(x)
+ default:
+ // ok
+ }
+ // empty switch
+ switch {
+ }
+ // empty switch
+ switch {
+ default:
+ }
+ // empty switch
+ switch {
+ default:
+ fallthrough
+ case false:
+ }
+
+ // string -> []rune conversion.
+ use([]rune("foo"))
+
+ // Calls of form x.f().
+ type S2 struct {
+ f func() int
+ }
+ S2{f: func() int { return 1 }}.f() // field is a func value
+ T{}.f() // method call
+ i.f() // interface method invocation
+ (interface {
+ f() int
+ }(T{})).f() // anon interface method invocation
+
+ // Map lookup.
+ if v, ok := map[string]string{}["foo5"]; v != "" || ok {
+ panic("oops")
+ }
+
+ // Regression test: implicit address-taken struct literal
+ // inside literal map element.
+ _ = map[int]*struct{}{0: {}}
+}
+
+type mybool bool
+
+func (mybool) f() {}
+
+func init() {
+ type mybool bool
+ var b mybool
+ var i interface{} = b || b // result preserves types of operands
+ _ = i.(mybool)
+
+ i = false && b // result preserves type of "typed" operand
+ _ = i.(mybool)
+
+ i = b || true // result preserves type of "typed" operand
+ _ = i.(mybool)
+}
+
+func init() {
+ var x, y int
+ var b mybool = x == y // x==y is an untyped bool
+ b.f()
+}
+
+// Simple closures.
+func init() {
+ b := 3
+ f := func(a int) int {
+ return a + b
+ }
+ b++
+ if x := f(1); x != 5 { // 1+4 == 5
+ panic(x)
+ }
+ b++
+ if x := f(2); x != 7 { // 2+5 == 7
+ panic(x)
+ }
+ if b := f(1) < 16 || f(2) < 17; !b {
+ panic("oops")
+ }
+}
+
+// Shifts.
+func init() {
+ var i int64 = 1
+ var u uint64 = 1 << 32
+ if x := i << uint32(u); x != 1 {
+ panic(x)
+ }
+ if x := i << uint64(u); x != 0 {
+ panic(x)
+ }
+}
+
+// Implicit conversion of delete() key operand.
+func init() {
+ type I interface{}
+ m := make(map[I]bool)
+ m[1] = true
+ m[I(2)] = true
+ if len(m) != 2 {
+ panic(m)
+ }
+ delete(m, I(1))
+ delete(m, 2)
+ if len(m) != 0 {
+ panic(m)
+ }
+}
+
+// An I->I conversion always succeeds.
+func init() {
+ var x I
+ if I(x) != I(nil) {
+ panic("I->I conversion failed")
+ }
+}
+
+// An I->I type-assert fails iff the value is nil.
+func init() {
+ defer func() {
+ r := fmt.Sprint(recover())
+ // Exact error varies by toolchain.
+ if r != "runtime error: interface conversion: interface is nil, not main.I" &&
+ r != "interface conversion: interface is nil, not main.I" {
+ panic("I->I type assertion succeeded for nil value")
+ }
+ }()
+ var x I
+ _ = x.(I)
+}
+
+//////////////////////////////////////////////////////////////////////
+// Variadic bridge methods and interface thunks.
+
+type VT int
+
+var vcount = 0
+
+func (VT) f(x int, y ...string) {
+ vcount++
+ if x != 1 {
+ panic(x)
+ }
+ if len(y) != 2 || y[0] != "foo" || y[1] != "bar" {
+ panic(y)
+ }
+}
+
+type VS struct {
+ VT
+}
+
+type VI interface {
+ f(x int, y ...string)
+}
+
+func init() {
+ foobar := []string{"foo", "bar"}
+ var s VS
+ s.f(1, "foo", "bar")
+ s.f(1, foobar...)
+ if vcount != 2 {
+ panic("s.f not called twice")
+ }
+
+ fn := VI.f
+ fn(s, 1, "foo", "bar")
+ fn(s, 1, foobar...)
+ if vcount != 4 {
+ panic("I.f not called twice")
+ }
+}
+
+// Multiple labels on same statement.
+func multipleLabels() {
+ var trace []int
+ i := 0
+one:
+two:
+ for ; i < 3; i++ {
+ trace = append(trace, i)
+ switch i {
+ case 0:
+ continue two
+ case 1:
+ i++
+ goto one
+ case 2:
+ break two
+ }
+ }
+ if x := fmt.Sprint(trace); x != "[0 1 2]" {
+ panic(x)
+ }
+}
+
+func init() {
+ multipleLabels()
+}
+
+func init() {
+ // Struct equivalence ignores blank fields.
+ type s struct{ x, _, z int }
+ s1 := s{x: 1, z: 3}
+ s2 := s{x: 1, z: 3}
+ if s1 != s2 {
+ panic("not equal")
+ }
+}
+
+func init() {
+ // A slice var can be compared to const []T nil.
+ var i interface{} = []string{"foo"}
+ var j interface{} = []string(nil)
+ if i.([]string) == nil {
+ panic("expected i non-nil")
+ }
+ if j.([]string) != nil {
+ panic("expected j nil")
+ }
+ // But two slices cannot be compared, even if one is nil.
+ defer func() {
+ r := fmt.Sprint(recover())
+ if r != "runtime error: comparing uncomparable type []string" {
+ panic("want panic from slice comparison, got " + r)
+ }
+ }()
+ _ = i == j // interface comparison recurses on types
+}
+
+func init() {
+ // Regression test for SSA renaming bug.
+ var ints []int
+ for _ = range "foo" {
+ var x int
+ x++
+ ints = append(ints, x)
+ }
+ if fmt.Sprint(ints) != "[1 1 1]" {
+ panic(ints)
+ }
+}
+
+// Regression test for issue 6949:
+// []byte("foo") is not a constant since it allocates memory.
+func init() {
+ var r string
+ for i, b := range "ABC" {
+ x := []byte("abc")
+ x[i] = byte(b)
+ r += string(x)
+ }
+ if r != "AbcaBcabC" {
+ panic(r)
+ }
+}
+
+// Test of 3-operand x[lo:hi:max] slice.
+func init() {
+ s := []int{0, 1, 2, 3}
+ lenCapLoHi := func(x []int) [4]int { return [4]int{len(x), cap(x), x[0], x[len(x)-1]} }
+ if got := lenCapLoHi(s[1:3]); got != [4]int{2, 3, 1, 2} {
+ panic(got)
+ }
+ if got := lenCapLoHi(s[1:3:3]); got != [4]int{2, 2, 1, 2} {
+ panic(got)
+ }
+ max := 3
+ if "a"[0] == 'a' {
+ max = 2 // max is non-constant, even in SSA form
+ }
+ if got := lenCapLoHi(s[1:2:max]); got != [4]int{1, 1, 1, 1} {
+ panic(got)
+ }
+}
+
+// Test that a nice error is issue by indirection wrappers.
+func init() {
+ var ptr *T
+ var i I = ptr
+
+ defer func() {
+ r := fmt.Sprint(recover())
+ // Exact error varies by toolchain:
+ if r != "runtime error: value method (main.T).f called using nil *main.T pointer" &&
+ r != "value method main.T.f called using nil *T pointer" {
+ panic("want panic from call with nil receiver, got " + r)
+ }
+ }()
+ i.f()
+ panic("unreachable")
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/testdata/defer.go b/llgo/third_party/go.tools/go/ssa/interp/testdata/defer.go
new file mode 100644
index 0000000000000000000000000000000000000000..f5bae6c3f4e3f2e97d87b305240b0ef44b7fb9a0
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/testdata/defer.go
@@ -0,0 +1,53 @@
+package main
+
+// Tests of defer. (Deferred recover() belongs is recover.go.)
+
+import "fmt"
+
+func deferMutatesResults(noArgReturn bool) (a, b int) {
+ defer func() {
+ if a != 1 || b != 2 {
+ panic(fmt.Sprint(a, b))
+ }
+ a, b = 3, 4
+ }()
+ if noArgReturn {
+ a, b = 1, 2
+ return
+ }
+ return 1, 2
+}
+
+func init() {
+ a, b := deferMutatesResults(true)
+ if a != 3 || b != 4 {
+ panic(fmt.Sprint(a, b))
+ }
+ a, b = deferMutatesResults(false)
+ if a != 3 || b != 4 {
+ panic(fmt.Sprint(a, b))
+ }
+}
+
+// We concatenate init blocks to make a single function, but we must
+// run defers at the end of each block, not the combined function.
+var deferCount = 0
+
+func init() {
+ deferCount = 1
+ defer func() {
+ deferCount++
+ }()
+ // defer runs HERE
+}
+
+func init() {
+ // Strictly speaking the spec says deferCount may be 0 or 2
+ // since the relative order of init blocks is unspecified.
+ if deferCount != 2 {
+ panic(deferCount) // defer call has not run!
+ }
+}
+
+func main() {
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/testdata/fieldprom.go b/llgo/third_party/go.tools/go/ssa/interp/testdata/fieldprom.go
new file mode 100644
index 0000000000000000000000000000000000000000..fc276ddbf08de7c7e07837618857a6d74865f4a2
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/testdata/fieldprom.go
@@ -0,0 +1,114 @@
+package main
+
+// Tests of field promotion logic.
+
+type A struct {
+ x int
+ y *int
+}
+
+type B struct {
+ p int
+ q *int
+}
+
+type C struct {
+ A
+ *B
+}
+
+type D struct {
+ a int
+ C
+}
+
+func assert(cond bool) {
+ if !cond {
+ panic("failed")
+ }
+}
+
+func f1(c C) {
+ assert(c.x == c.A.x)
+ assert(c.y == c.A.y)
+ assert(&c.x == &c.A.x)
+ assert(&c.y == &c.A.y)
+
+ assert(c.p == c.B.p)
+ assert(c.q == c.B.q)
+ assert(&c.p == &c.B.p)
+ assert(&c.q == &c.B.q)
+
+ c.x = 1
+ *c.y = 1
+ c.p = 1
+ *c.q = 1
+}
+
+func f2(c *C) {
+ assert(c.x == c.A.x)
+ assert(c.y == c.A.y)
+ assert(&c.x == &c.A.x)
+ assert(&c.y == &c.A.y)
+
+ assert(c.p == c.B.p)
+ assert(c.q == c.B.q)
+ assert(&c.p == &c.B.p)
+ assert(&c.q == &c.B.q)
+
+ c.x = 1
+ *c.y = 1
+ c.p = 1
+ *c.q = 1
+}
+
+func f3(d D) {
+ assert(d.x == d.C.A.x)
+ assert(d.y == d.C.A.y)
+ assert(&d.x == &d.C.A.x)
+ assert(&d.y == &d.C.A.y)
+
+ assert(d.p == d.C.B.p)
+ assert(d.q == d.C.B.q)
+ assert(&d.p == &d.C.B.p)
+ assert(&d.q == &d.C.B.q)
+
+ d.x = 1
+ *d.y = 1
+ d.p = 1
+ *d.q = 1
+}
+
+func f4(d *D) {
+ assert(d.x == d.C.A.x)
+ assert(d.y == d.C.A.y)
+ assert(&d.x == &d.C.A.x)
+ assert(&d.y == &d.C.A.y)
+
+ assert(d.p == d.C.B.p)
+ assert(d.q == d.C.B.q)
+ assert(&d.p == &d.C.B.p)
+ assert(&d.q == &d.C.B.q)
+
+ d.x = 1
+ *d.y = 1
+ d.p = 1
+ *d.q = 1
+}
+
+func main() {
+ y := 123
+ c := C{
+ A{x: 42, y: &y},
+ &B{p: 42, q: &y},
+ }
+
+ assert(&c.x == &c.A.x)
+
+ f1(c)
+ f2(&c)
+
+ d := D{C: c}
+ f3(d)
+ f4(&d)
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/testdata/ifaceconv.go b/llgo/third_party/go.tools/go/ssa/interp/testdata/ifaceconv.go
new file mode 100644
index 0000000000000000000000000000000000000000..96fc105862440525afed1eaf96896cf972a1b7f4
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/testdata/ifaceconv.go
@@ -0,0 +1,83 @@
+package main
+
+// Tests of interface conversions and type assertions.
+
+type I0 interface {
+}
+type I1 interface {
+ f()
+}
+type I2 interface {
+ f()
+ g()
+}
+
+type C0 struct{}
+type C1 struct{}
+
+func (C1) f() {}
+
+type C2 struct{}
+
+func (C2) f() {}
+func (C2) g() {}
+
+func main() {
+ var i0 I0
+ var i1 I1
+ var i2 I2
+
+ // Nil always causes a type assertion to fail, even to the
+ // same type.
+ if _, ok := i0.(I0); ok {
+ panic("nil i0.(I0) succeeded")
+ }
+ if _, ok := i1.(I1); ok {
+ panic("nil i1.(I1) succeeded")
+ }
+ if _, ok := i2.(I2); ok {
+ panic("nil i2.(I2) succeeded")
+ }
+
+ // Conversions can't fail, even with nil.
+ _ = I0(i0)
+
+ _ = I0(i1)
+ _ = I1(i1)
+
+ _ = I0(i2)
+ _ = I1(i2)
+ _ = I2(i2)
+
+ // Non-nil type assertions pass or fail based on the concrete type.
+ i1 = C1{}
+ if _, ok := i1.(I0); !ok {
+ panic("C1 i1.(I0) failed")
+ }
+ if _, ok := i1.(I1); !ok {
+ panic("C1 i1.(I1) failed")
+ }
+ if _, ok := i1.(I2); ok {
+ panic("C1 i1.(I2) succeeded")
+ }
+
+ i1 = C2{}
+ if _, ok := i1.(I0); !ok {
+ panic("C2 i1.(I0) failed")
+ }
+ if _, ok := i1.(I1); !ok {
+ panic("C2 i1.(I1) failed")
+ }
+ if _, ok := i1.(I2); !ok {
+ panic("C2 i1.(I2) failed")
+ }
+
+ // Conversions can't fail.
+ i1 = C1{}
+ if I0(i1) == nil {
+ panic("C1 I0(i1) was nil")
+ }
+ if I1(i1) == nil {
+ panic("C1 I1(i1) was nil")
+ }
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/testdata/ifaceprom.go b/llgo/third_party/go.tools/go/ssa/interp/testdata/ifaceprom.go
new file mode 100644
index 0000000000000000000000000000000000000000..414dc7363637e1157725a77786914ef5b4649d9a
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/testdata/ifaceprom.go
@@ -0,0 +1,58 @@
+package main
+
+// Test of promotion of methods of an interface embedded within a
+// struct. In particular, this test exercises that the correct
+// method is called.
+
+type I interface {
+ one() int
+ two() string
+}
+
+type S struct {
+ I
+}
+
+type impl struct{}
+
+func (impl) one() int {
+ return 1
+}
+
+func (impl) two() string {
+ return "two"
+}
+
+func main() {
+ var s S
+ s.I = impl{}
+ if one := s.I.one(); one != 1 {
+ panic(one)
+ }
+ if one := s.one(); one != 1 {
+ panic(one)
+ }
+ closOne := s.I.one
+ if one := closOne(); one != 1 {
+ panic(one)
+ }
+ closOne = s.one
+ if one := closOne(); one != 1 {
+ panic(one)
+ }
+
+ if two := s.I.two(); two != "two" {
+ panic(two)
+ }
+ if two := s.two(); two != "two" {
+ panic(two)
+ }
+ closTwo := s.I.two
+ if two := closTwo(); two != "two" {
+ panic(two)
+ }
+ closTwo = s.two
+ if two := closTwo(); two != "two" {
+ panic(two)
+ }
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/testdata/initorder.go b/llgo/third_party/go.tools/go/ssa/interp/testdata/initorder.go
new file mode 100644
index 0000000000000000000000000000000000000000..0f26bed695558c4e68853a9967c6716d4f5e06a5
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/testdata/initorder.go
@@ -0,0 +1,67 @@
+package main
+
+import "fmt"
+
+// Test of initialization order of package-level vars.
+
+var counter int
+
+func next() int {
+ c := counter
+ counter++
+ return c
+}
+
+func next2() (x int, y int) {
+ x = next()
+ y = next()
+ return
+}
+
+func makeOrder() int {
+ _, _, _, _ = f, b, d, e
+ return 0
+}
+
+func main() {
+ // Initialization constraints:
+ // - {f,b,c/d,e} < order (ref graph traversal)
+ // - order < {a} (lexical order)
+ // - b < c/d < e < f (lexical order)
+ // Solution: a b c/d e f
+ abcdef := [6]int{a, b, c, d, e, f}
+ if abcdef != [6]int{0, 1, 2, 3, 4, 5} {
+ panic(abcdef)
+ }
+}
+
+var order = makeOrder()
+
+var a, b = next(), next()
+var c, d = next2()
+var e, f = next(), next()
+
+// ------------------------------------------------------------------------
+
+var order2 []string
+
+func create(x int, name string) int {
+ order2 = append(order2, name)
+ return x
+}
+
+var C = create(B+1, "C")
+var A, B = create(1, "A"), create(2, "B")
+
+// Initialization order of package-level value specs.
+func init() {
+ x := fmt.Sprint(order2)
+ // Result varies by toolchain. This is a spec bug.
+ if x != "[B C A]" && // gc
+ x != "[A B C]" { // go/types
+ panic(x)
+ }
+ if C != 3 {
+ panic(c)
+ }
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/testdata/methprom.go b/llgo/third_party/go.tools/go/ssa/interp/testdata/methprom.go
new file mode 100644
index 0000000000000000000000000000000000000000..e8e384c311721aea60c4ebb488260f6f640410cf
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/testdata/methprom.go
@@ -0,0 +1,93 @@
+package main
+
+// Tests of method promotion logic.
+
+type A struct{ magic int }
+
+func (a A) x() {
+ if a.magic != 1 {
+ panic(a.magic)
+ }
+}
+func (a *A) y() *A {
+ return a
+}
+
+type B struct{ magic int }
+
+func (b B) p() {
+ if b.magic != 2 {
+ panic(b.magic)
+ }
+}
+func (b *B) q() {
+ if b != theC.B {
+ panic("oops")
+ }
+}
+
+type I interface {
+ f()
+}
+
+type impl struct{ magic int }
+
+func (i impl) f() {
+ if i.magic != 3 {
+ panic("oops")
+ }
+}
+
+type C struct {
+ A
+ *B
+ I
+}
+
+func assert(cond bool) {
+ if !cond {
+ panic("failed")
+ }
+}
+
+var theC = C{
+ A: A{1},
+ B: &B{2},
+ I: impl{3},
+}
+
+func addr() *C {
+ return &theC
+}
+
+func value() C {
+ return theC
+}
+
+func main() {
+ // address
+ addr().x()
+ if addr().y() != &theC.A {
+ panic("oops")
+ }
+ addr().p()
+ addr().q()
+ addr().f()
+
+ // addressable value
+ var c C = value()
+ c.x()
+ if c.y() != &c.A {
+ panic("oops")
+ }
+ c.p()
+ c.q()
+ c.f()
+
+ // non-addressable value
+ value().x()
+ // value().y() // not in method set
+ value().p()
+ value().q()
+ value().f()
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/testdata/mrvchain.go b/llgo/third_party/go.tools/go/ssa/interp/testdata/mrvchain.go
new file mode 100644
index 0000000000000000000000000000000000000000..70dfd0273265772374a2790f283358ba4bd8d7be
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/testdata/mrvchain.go
@@ -0,0 +1,75 @@
+// Tests of call chaining f(g()) when g has multiple return values (MRVs).
+// See https://code.google.com/p/go/issues/detail?id=4573.
+
+package main
+
+func assert(actual, expected int) {
+ if actual != expected {
+ panic(actual)
+ }
+}
+
+func g() (int, int) {
+ return 5, 7
+}
+
+func g2() (float64, float64) {
+ return 5, 7
+}
+
+func f1v(x int, v ...int) {
+ assert(x, 5)
+ assert(v[0], 7)
+}
+
+func f2(x, y int) {
+ assert(x, 5)
+ assert(y, 7)
+}
+
+func f2v(x, y int, v ...int) {
+ assert(x, 5)
+ assert(y, 7)
+ assert(len(v), 0)
+}
+
+func complexArgs() (float64, float64) {
+ return 5, 7
+}
+
+func appendArgs() ([]string, string) {
+ return []string{"foo"}, "bar"
+}
+
+func h() (i interface{}, ok bool) {
+ m := map[int]string{1: "hi"}
+ i, ok = m[1] // string->interface{} conversion within multi-valued expression
+ return
+}
+
+func h2() (i interface{}, ok bool) {
+ ch := make(chan string, 1)
+ ch <- "hi"
+ i, ok = <-ch // string->interface{} conversion within multi-valued expression
+ return
+}
+
+func main() {
+ f1v(g())
+ f2(g())
+ f2v(g())
+ if c := complex(complexArgs()); c != 5+7i {
+ panic(c)
+ }
+ if s := append(appendArgs()); len(s) != 2 || s[0] != "foo" || s[1] != "bar" {
+ panic(s)
+ }
+ i, ok := h()
+ if !ok || i.(string) != "hi" {
+ panic(i)
+ }
+ i, ok = h2()
+ if !ok || i.(string) != "hi" {
+ panic(i)
+ }
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/testdata/range.go b/llgo/third_party/go.tools/go/ssa/interp/testdata/range.go
new file mode 100644
index 0000000000000000000000000000000000000000..da8a421e629f3f2b536797c168fb975f25aab256
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/testdata/range.go
@@ -0,0 +1,55 @@
+package main
+
+// Tests of range loops.
+
+import "fmt"
+
+// Range over string.
+func init() {
+ if x := len("Hello, 世界"); x != 13 { // bytes
+ panic(x)
+ }
+ var indices []int
+ var runes []rune
+ for i, r := range "Hello, 世界" {
+ runes = append(runes, r)
+ indices = append(indices, i)
+ }
+ if x := fmt.Sprint(runes); x != "[72 101 108 108 111 44 32 19990 30028]" {
+ panic(x)
+ }
+ if x := fmt.Sprint(indices); x != "[0 1 2 3 4 5 6 7 10]" {
+ panic(x)
+ }
+ s := ""
+ for _, r := range runes {
+ s = fmt.Sprintf("%s%c", s, r)
+ }
+ if s != "Hello, 世界" {
+ panic(s)
+ }
+
+ var x int
+ for range "Hello, 世界" {
+ x++
+ }
+ if x != len(indices) {
+ panic(x)
+ }
+}
+
+// Regression test for range of pointer to named array type.
+func init() {
+ type intarr [3]int
+ ia := intarr{1, 2, 3}
+ var count int
+ for _, x := range &ia {
+ count += x
+ }
+ if count != 6 {
+ panic(count)
+ }
+}
+
+func main() {
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/testdata/recover.go b/llgo/third_party/go.tools/go/ssa/interp/testdata/recover.go
new file mode 100644
index 0000000000000000000000000000000000000000..b5600522633a8b5fe4ee0720a7e4b2d47329b6ca
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/testdata/recover.go
@@ -0,0 +1,34 @@
+package main
+
+// Tests of panic/recover.
+
+import "fmt"
+
+func fortyTwo() (r int) {
+ r = 42
+ // The next two statements simulate a 'return' statement.
+ defer func() { recover() }()
+ panic(nil)
+}
+
+func zero() int {
+ defer func() { recover() }()
+ panic(1)
+}
+
+func zeroEmpty() (int, string) {
+ defer func() { recover() }()
+ panic(1)
+}
+
+func main() {
+ if r := fortyTwo(); r != 42 {
+ panic(r)
+ }
+ if r := zero(); r != 0 {
+ panic(r)
+ }
+ if r, s := zeroEmpty(); r != 0 || s != "" {
+ panic(fmt.Sprint(r, s))
+ }
+}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/testdata/static.go b/llgo/third_party/go.tools/go/ssa/interp/testdata/static.go
new file mode 100644
index 0000000000000000000000000000000000000000..b115513c6317cee9fe888d708f4c23071b522a7f
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/testdata/static.go
@@ -0,0 +1,58 @@
+package main
+
+// Static tests of SSA builder (via the sanity checker).
+// Dynamic semantics are not exercised.
+
+func init() {
+ // Regression test for issue 6806.
+ ch := make(chan int)
+ select {
+ case n, _ := <-ch:
+ _ = n
+ default:
+ // The default case disables the simplification of
+ // select to a simple receive statement.
+ }
+
+ // value,ok-form receive where TypeOf(ok) is a named boolean.
+ type mybool bool
+ var x int
+ var y mybool
+ select {
+ case x, y = <-ch:
+ default:
+ // The default case disables the simplification of
+ // select to a simple receive statement.
+ }
+ _ = x
+ _ = y
+}
+
+var a int
+
+// Regression test for issue 7840 (covered by SSA sanity checker).
+func bug7840() bool {
+ // This creates a single-predecessor block with a φ-node.
+ return false && a == 0 && a == 0
+}
+
+// A blocking select (sans "default:") cannot fall through.
+// Regression test for issue 7022.
+func bug7022() int {
+ var c1, c2 chan int
+ select {
+ case <-c1:
+ return 123
+ case <-c2:
+ return 456
+ }
+}
+
+// Parens should not prevent intrinsic treatment of built-ins.
+// (Regression test for a crash.)
+func init() {
+ _ = (new)(int)
+ _ = (make)([]int, 0)
+}
+
+func main() {}
diff --git a/llgo/third_party/go.tools/go/ssa/interp/value.go b/llgo/third_party/go.tools/go/ssa/interp/value.go
new file mode 100644
index 0000000000000000000000000000000000000000..67717edc8036f26cf36d30feff6d18dd9a774db3
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/interp/value.go
@@ -0,0 +1,487 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package interp
+
+// Values
+//
+// All interpreter values are "boxed" in the empty interface, value.
+// The range of possible dynamic types within value are:
+//
+// - bool
+// - numbers (all built-in int/float/complex types are distinguished)
+// - string
+// - map[value]value --- maps for which usesBuiltinMap(keyType)
+// *hashmap --- maps for which !usesBuiltinMap(keyType)
+// - chan value
+// - []value --- slices
+// - iface --- interfaces.
+// - structure --- structs. Fields are ordered and accessed by numeric indices.
+// - array --- arrays.
+// - *value --- pointers. Careful: *value is a distinct type from *array etc.
+// - *ssa.Function \
+// *ssa.Builtin } --- functions. A nil 'func' is always of type *ssa.Function.
+// *closure /
+// - tuple --- as returned by Return, Next, "value,ok" modes, etc.
+// - iter --- iterators from 'range' over map or string.
+// - bad --- a poison pill for locals that have gone out of scope.
+// - rtype -- the interpreter's concrete implementation of reflect.Type
+//
+// Note that nil is not on this list.
+//
+// Pay close attention to whether or not the dynamic type is a pointer.
+// The compiler cannot help you since value is an empty interface.
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+ "unsafe"
+
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llgo/third_party/go.tools/go/types/typeutil"
+)
+
+type value interface{}
+
+type tuple []value
+
+type array []value
+
+type iface struct {
+ t types.Type // never an "untyped" type
+ v value
+}
+
+type structure []value
+
+// For map, array, *array, slice, string or channel.
+type iter interface {
+ // next returns a Tuple (key, value, ok).
+ // key and value are unaliased, e.g. copies of the sequence element.
+ next() tuple
+}
+
+type closure struct {
+ Fn *ssa.Function
+ Env []value
+}
+
+type bad struct{}
+
+type rtype struct {
+ t types.Type
+}
+
+// Hash functions and equivalence relation:
+
+// hashString computes the FNV hash of s.
+func hashString(s string) int {
+ var h uint32
+ for i := 0; i < len(s); i++ {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ return int(h)
+}
+
+var (
+ mu sync.Mutex
+ hasher = typeutil.MakeHasher()
+)
+
+// hashType returns a hash for t such that
+// types.Identical(x, y) => hashType(x) == hashType(y).
+func hashType(t types.Type) int {
+ mu.Lock()
+ h := int(hasher.Hash(t))
+ mu.Unlock()
+ return h
+}
+
+// usesBuiltinMap returns true if the built-in hash function and
+// equivalence relation for type t are consistent with those of the
+// interpreter's representation of type t. Such types are: all basic
+// types (bool, numbers, string), pointers and channels.
+//
+// usesBuiltinMap returns false for types that require a custom map
+// implementation: interfaces, arrays and structs.
+//
+// Panic ensues if t is an invalid map key type: function, map or slice.
+func usesBuiltinMap(t types.Type) bool {
+ switch t := t.(type) {
+ case *types.Basic, *types.Chan, *types.Pointer:
+ return true
+ case *types.Named:
+ return usesBuiltinMap(t.Underlying())
+ case *types.Interface, *types.Array, *types.Struct:
+ return false
+ }
+ panic(fmt.Sprintf("invalid map key type: %T", t))
+}
+
+func (x array) eq(t types.Type, _y interface{}) bool {
+ y := _y.(array)
+ tElt := t.Underlying().(*types.Array).Elem()
+ for i, xi := range x {
+ if !equals(tElt, xi, y[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+func (x array) hash(t types.Type) int {
+ h := 0
+ tElt := t.Underlying().(*types.Array).Elem()
+ for _, xi := range x {
+ h += hash(tElt, xi)
+ }
+ return h
+}
+
+func (x structure) eq(t types.Type, _y interface{}) bool {
+ y := _y.(structure)
+ tStruct := t.Underlying().(*types.Struct)
+ for i, n := 0, tStruct.NumFields(); i < n; i++ {
+ if f := tStruct.Field(i); !f.Anonymous() {
+ if !equals(f.Type(), x[i], y[i]) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func (x structure) hash(t types.Type) int {
+ tStruct := t.Underlying().(*types.Struct)
+ h := 0
+ for i, n := 0, tStruct.NumFields(); i < n; i++ {
+ if f := tStruct.Field(i); !f.Anonymous() {
+ h += hash(f.Type(), x[i])
+ }
+ }
+ return h
+}
+
+// nil-tolerant variant of types.Identical.
+func sameType(x, y types.Type) bool {
+ if x == nil {
+ return y == nil
+ }
+ return y != nil && types.Identical(x, y)
+}
+
+func (x iface) eq(t types.Type, _y interface{}) bool {
+ y := _y.(iface)
+ return sameType(x.t, y.t) && (x.t == nil || equals(x.t, x.v, y.v))
+}
+
+func (x iface) hash(_ types.Type) int {
+ return hashType(x.t)*8581 + hash(x.t, x.v)
+}
+
+func (x rtype) hash(_ types.Type) int {
+ return hashType(x.t)
+}
+
+func (x rtype) eq(_ types.Type, y interface{}) bool {
+ return types.Identical(x.t, y.(rtype).t)
+}
+
+// equals returns true iff x and y are equal according to Go's
+// linguistic equivalence relation for type t.
+// In a well-typed program, the dynamic types of x and y are
+// guaranteed equal.
+func equals(t types.Type, x, y value) bool {
+ switch x := x.(type) {
+ case bool:
+ return x == y.(bool)
+ case int:
+ return x == y.(int)
+ case int8:
+ return x == y.(int8)
+ case int16:
+ return x == y.(int16)
+ case int32:
+ return x == y.(int32)
+ case int64:
+ return x == y.(int64)
+ case uint:
+ return x == y.(uint)
+ case uint8:
+ return x == y.(uint8)
+ case uint16:
+ return x == y.(uint16)
+ case uint32:
+ return x == y.(uint32)
+ case uint64:
+ return x == y.(uint64)
+ case uintptr:
+ return x == y.(uintptr)
+ case float32:
+ return x == y.(float32)
+ case float64:
+ return x == y.(float64)
+ case complex64:
+ return x == y.(complex64)
+ case complex128:
+ return x == y.(complex128)
+ case string:
+ return x == y.(string)
+ case *value:
+ return x == y.(*value)
+ case chan value:
+ return x == y.(chan value)
+ case structure:
+ return x.eq(t, y)
+ case array:
+ return x.eq(t, y)
+ case iface:
+ return x.eq(t, y)
+ case rtype:
+ return x.eq(t, y)
+ }
+
+ // Since map, func and slice don't support comparison, this
+ // case is only reachable if one of x or y is literally nil
+ // (handled in eqnil) or via interface{} values.
+ panic(fmt.Sprintf("comparing uncomparable type %s", t))
+}
+
+// Returns an integer hash of x such that equals(x, y) => hash(x) == hash(y).
+func hash(t types.Type, x value) int {
+ switch x := x.(type) {
+ case bool:
+ if x {
+ return 1
+ }
+ return 0
+ case int:
+ return x
+ case int8:
+ return int(x)
+ case int16:
+ return int(x)
+ case int32:
+ return int(x)
+ case int64:
+ return int(x)
+ case uint:
+ return int(x)
+ case uint8:
+ return int(x)
+ case uint16:
+ return int(x)
+ case uint32:
+ return int(x)
+ case uint64:
+ return int(x)
+ case uintptr:
+ return int(x)
+ case float32:
+ return int(x)
+ case float64:
+ return int(x)
+ case complex64:
+ return int(real(x))
+ case complex128:
+ return int(real(x))
+ case string:
+ return hashString(x)
+ case *value:
+ return int(uintptr(unsafe.Pointer(x)))
+ case chan value:
+ return int(uintptr(reflect.ValueOf(x).Pointer()))
+ case structure:
+ return x.hash(t)
+ case array:
+ return x.hash(t)
+ case iface:
+ return x.hash(t)
+ case rtype:
+ return x.hash(t)
+ }
+ panic(fmt.Sprintf("%T is unhashable", x))
+}
+
+// copyVal returns a copy of value v.
+// TODO(adonovan): add tests of aliasing and mutation.
+func copyVal(v value) value {
+ if v == nil {
+ panic("copyVal(nil)")
+ }
+ switch v := v.(type) {
+ case bool, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr, float32, float64, complex64, complex128, string, unsafe.Pointer:
+ return v
+ case map[value]value:
+ return v
+ case *hashmap:
+ return v
+ case chan value:
+ return v
+ case *value:
+ return v
+ case *ssa.Function, *ssa.Builtin, *closure:
+ return v
+ case iface:
+ return v
+ case []value:
+ return v
+ case structure:
+ a := make(structure, len(v))
+ copy(a, v)
+ return a
+ case array:
+ a := make(array, len(v))
+ copy(a, v)
+ return a
+ case tuple:
+ break
+ case rtype:
+ return v
+ }
+ panic(fmt.Sprintf("cannot copy %T", v))
+}
+
+// Prints in the style of built-in println.
+// (More or less; in gc println is actually a compiler intrinsic and
+// can distinguish println(1) from println(interface{}(1)).)
+func writeValue(buf *bytes.Buffer, v value) {
+ switch v := v.(type) {
+ case nil, bool, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr, float32, float64, complex64, complex128, string:
+ fmt.Fprintf(buf, "%v", v)
+
+ case map[value]value:
+ buf.WriteString("map[")
+ sep := ""
+ for k, e := range v {
+ buf.WriteString(sep)
+ sep = " "
+ writeValue(buf, k)
+ buf.WriteString(":")
+ writeValue(buf, e)
+ }
+ buf.WriteString("]")
+
+ case *hashmap:
+ buf.WriteString("map[")
+ sep := " "
+ for _, e := range v.table {
+ for e != nil {
+ buf.WriteString(sep)
+ sep = " "
+ writeValue(buf, e.key)
+ buf.WriteString(":")
+ writeValue(buf, e.value)
+ e = e.next
+ }
+ }
+ buf.WriteString("]")
+
+ case chan value:
+ fmt.Fprintf(buf, "%v", v) // (an address)
+
+ case *value:
+ if v == nil {
+ buf.WriteString("")
+ } else {
+ fmt.Fprintf(buf, "%p", v)
+ }
+
+ case iface:
+ fmt.Fprintf(buf, "(%s, ", v.t)
+ writeValue(buf, v.v)
+ buf.WriteString(")")
+
+ case structure:
+ buf.WriteString("{")
+ for i, e := range v {
+ if i > 0 {
+ buf.WriteString(" ")
+ }
+ writeValue(buf, e)
+ }
+ buf.WriteString("}")
+
+ case array:
+ buf.WriteString("[")
+ for i, e := range v {
+ if i > 0 {
+ buf.WriteString(" ")
+ }
+ writeValue(buf, e)
+ }
+ buf.WriteString("]")
+
+ case []value:
+ buf.WriteString("[")
+ for i, e := range v {
+ if i > 0 {
+ buf.WriteString(" ")
+ }
+ writeValue(buf, e)
+ }
+ buf.WriteString("]")
+
+ case *ssa.Function, *ssa.Builtin, *closure:
+ fmt.Fprintf(buf, "%p", v) // (an address)
+
+ case rtype:
+ buf.WriteString(v.t.String())
+
+ case tuple:
+ // Unreachable in well-formed Go programs
+ buf.WriteString("(")
+ for i, e := range v {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ writeValue(buf, e)
+ }
+ buf.WriteString(")")
+
+ default:
+ fmt.Fprintf(buf, "<%T>", v)
+ }
+}
+
+// Implements printing of Go values in the style of built-in println.
+func toString(v value) string {
+ var b bytes.Buffer
+ writeValue(&b, v)
+ return b.String()
+}
+
+// ------------------------------------------------------------------------
+// Iterators
+
+type stringIter struct {
+ *strings.Reader
+ i int
+}
+
+func (it *stringIter) next() tuple {
+ okv := make(tuple, 3)
+ ch, n, err := it.ReadRune()
+ ok := err != io.EOF
+ okv[0] = ok
+ if ok {
+ okv[1] = it.i
+ okv[2] = ch
+ }
+ it.i += n
+ return okv
+}
+
+type mapIter chan [2]value
+
+func (it mapIter) next() tuple {
+ kv, ok := <-it
+ return tuple{ok, kv[0], kv[1]}
+}
diff --git a/llgo/third_party/go.tools/go/ssa/lift.go b/llgo/third_party/go.tools/go/ssa/lift.go
new file mode 100644
index 0000000000000000000000000000000000000000..85f162d15c9becdaab7e8ed729050af1808bfafa
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/lift.go
@@ -0,0 +1,599 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file defines the lifting pass which tries to "lift" Alloc
+// cells (new/local variables) into SSA registers, replacing loads
+// with the dominating stored value, eliminating loads and stores, and
+// inserting φ-nodes as needed.
+
+// Cited papers and resources:
+//
+// Ron Cytron et al. 1991. Efficiently computing SSA form...
+// http://doi.acm.org/10.1145/115372.115320
+//
+// Cooper, Harvey, Kennedy. 2001. A Simple, Fast Dominance Algorithm.
+// Software Practice and Experience 2001, 4:1-10.
+// http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
+//
+// Daniel Berlin, llvmdev mailing list, 2012.
+// http://lists.cs.uiuc.edu/pipermail/llvmdev/2012-January/046638.html
+// (Be sure to expand the whole thread.)
+
+// TODO(adonovan): opt: there are many optimizations worth evaluating, and
+// the conventional wisdom for SSA construction is that a simple
+// algorithm well engineered often beats those of better asymptotic
+// complexity on all but the most egregious inputs.
+//
+// Danny Berlin suggests that the Cooper et al. algorithm for
+// computing the dominance frontier is superior to Cytron et al.
+// Furthermore he recommends that rather than computing the DF for the
+// whole function then renaming all alloc cells, it may be cheaper to
+// compute the DF for each alloc cell separately and throw it away.
+//
+// Consider exploiting liveness information to avoid creating dead
+// φ-nodes which we then immediately remove.
+//
+// Integrate lifting with scalar replacement of aggregates (SRA) since
+// the two are synergistic.
+//
+// Also see many other "TODO: opt" suggestions in the code.
+
+import (
+ "fmt"
+ "go/token"
+ "math/big"
+ "os"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// If true, perform sanity checking and show diagnostic information at
+// each step of lifting. Very verbose.
+const debugLifting = false
+
+// domFrontier maps each block to the set of blocks in its dominance
+// frontier. The outer slice is conceptually a map keyed by
+// Block.Index. The inner slice is conceptually a set, possibly
+// containing duplicates.
+//
+// TODO(adonovan): opt: measure impact of dups; consider a packed bit
+// representation, e.g. big.Int, and bitwise parallel operations for
+// the union step in the Children loop.
+//
+// domFrontier's methods mutate the slice's elements but not its
+// length, so their receivers needn't be pointers.
+//
+type domFrontier [][]*BasicBlock
+
+func (df domFrontier) add(u, v *BasicBlock) {
+ p := &df[u.Index]
+ *p = append(*p, v)
+}
+
+// build builds the dominance frontier df for the dominator (sub)tree
+// rooted at u, using the Cytron et al. algorithm.
+//
+// TODO(adonovan): opt: consider Berlin approach, computing pruned SSA
+// by pruning the entire IDF computation, rather than merely pruning
+// the DF -> IDF step.
+func (df domFrontier) build(u *BasicBlock) {
+ // Encounter each node u in postorder of dom tree.
+ for _, child := range u.dom.children {
+ df.build(child)
+ }
+ for _, vb := range u.Succs {
+ if v := vb.dom; v.idom != u {
+ df.add(u, vb)
+ }
+ }
+ for _, w := range u.dom.children {
+ for _, vb := range df[w.Index] {
+ // TODO(adonovan): opt: use word-parallel bitwise union.
+ if v := vb.dom; v.idom != u {
+ df.add(u, vb)
+ }
+ }
+ }
+}
+
+func buildDomFrontier(fn *Function) domFrontier {
+ df := make(domFrontier, len(fn.Blocks))
+ df.build(fn.Blocks[0])
+ if fn.Recover != nil {
+ df.build(fn.Recover)
+ }
+ return df
+}
+
+func removeInstr(refs []Instruction, instr Instruction) []Instruction {
+ i := 0
+ for _, ref := range refs {
+ if ref == instr {
+ continue
+ }
+ refs[i] = ref
+ i++
+ }
+ for j := i; j != len(refs); j++ {
+ refs[j] = nil // aid GC
+ }
+ return refs[:i]
+}
+
+// lift attempts to replace local and new Allocs accessed only with
+// load/store by SSA registers, inserting φ-nodes where necessary.
+// The result is a program in classical pruned SSA form.
+//
+// Preconditions:
+// - fn has no dead blocks (blockopt has run).
+// - Def/use info (Operands and Referrers) is up-to-date.
+// - The dominator tree is up-to-date.
+//
+func lift(fn *Function) {
+ // TODO(adonovan): opt: lots of little optimizations may be
+ // worthwhile here, especially if they cause us to avoid
+ // buildDomFrontier. For example:
+ //
+ // - Alloc never loaded? Eliminate.
+ // - Alloc never stored? Replace all loads with a zero constant.
+ // - Alloc stored once? Replace loads with dominating store;
+ // don't forget that an Alloc is itself an effective store
+ // of zero.
+ // - Alloc used only within a single block?
+ // Use degenerate algorithm avoiding φ-nodes.
+ // - Consider synergy with scalar replacement of aggregates (SRA).
+ // e.g. *(&x.f) where x is an Alloc.
+ // Perhaps we'd get better results if we generated this as x.f
+ // i.e. Field(x, .f) instead of Load(FieldIndex(x, .f)).
+ // Unclear.
+ //
+ // But we will start with the simplest correct code.
+ df := buildDomFrontier(fn)
+
+ if debugLifting {
+ title := false
+ for i, blocks := range df {
+ if blocks != nil {
+ if !title {
+ fmt.Fprintf(os.Stderr, "Dominance frontier of %s:\n", fn)
+ title = true
+ }
+ fmt.Fprintf(os.Stderr, "\t%s: %s\n", fn.Blocks[i], blocks)
+ }
+ }
+ }
+
+ newPhis := make(newPhiMap)
+
+ // During this pass we will replace some BasicBlock.Instrs
+ // (allocs, loads and stores) with nil, keeping a count in
+ // BasicBlock.gaps. At the end we will reset Instrs to the
+ // concatenation of all non-dead newPhis and non-nil Instrs
+ // for the block, reusing the original array if space permits.
+
+ // While we're here, we also eliminate 'rundefers'
+ // instructions in functions that contain no 'defer'
+ // instructions.
+ usesDefer := false
+
+ // Determine which allocs we can lift and number them densely.
+ // The renaming phase uses this numbering for compact maps.
+ numAllocs := 0
+ for _, b := range fn.Blocks {
+ b.gaps = 0
+ b.rundefers = 0
+ for _, instr := range b.Instrs {
+ switch instr := instr.(type) {
+ case *Alloc:
+ index := -1
+ if liftAlloc(df, instr, newPhis) {
+ index = numAllocs
+ numAllocs++
+ }
+ instr.index = index
+ case *Defer:
+ usesDefer = true
+ case *RunDefers:
+ b.rundefers++
+ }
+ }
+ }
+
+ // renaming maps an alloc (keyed by index) to its replacement
+ // value. Initially the renaming contains nil, signifying the
+ // zero constant of the appropriate type; we construct the
+ // Const lazily at most once on each path through the domtree.
+ // TODO(adonovan): opt: cache per-function not per subtree.
+ renaming := make([]Value, numAllocs)
+
+ // Renaming.
+ rename(fn.Blocks[0], renaming, newPhis)
+
+ // Eliminate dead new phis, then prepend the live ones to each block.
+ for _, b := range fn.Blocks {
+
+ // Compress the newPhis slice to eliminate unused phis.
+ // TODO(adonovan): opt: compute liveness to avoid
+ // placing phis in blocks for which the alloc cell is
+ // not live.
+ nps := newPhis[b]
+ j := 0
+ for _, np := range nps {
+ if !phiIsLive(np.phi) {
+ // discard it, first removing it from referrers
+ for _, newval := range np.phi.Edges {
+ if refs := newval.Referrers(); refs != nil {
+ *refs = removeInstr(*refs, np.phi)
+ }
+ }
+ continue
+ }
+ nps[j] = np
+ j++
+ }
+ nps = nps[:j]
+
+ rundefersToKill := b.rundefers
+ if usesDefer {
+ rundefersToKill = 0
+ }
+
+ if j+b.gaps+rundefersToKill == 0 {
+ continue // fast path: no new phis or gaps
+ }
+
+ // Compact nps + non-nil Instrs into a new slice.
+ // TODO(adonovan): opt: compact in situ if there is
+ // sufficient space or slack in the slice.
+ dst := make([]Instruction, len(b.Instrs)+j-b.gaps-rundefersToKill)
+ for i, np := range nps {
+ dst[i] = np.phi
+ }
+ for _, instr := range b.Instrs {
+ if instr == nil {
+ continue
+ }
+ if !usesDefer {
+ if _, ok := instr.(*RunDefers); ok {
+ continue
+ }
+ }
+ dst[j] = instr
+ j++
+ }
+ for i, np := range nps {
+ dst[i] = np.phi
+ }
+ b.Instrs = dst
+ }
+
+ // Remove any fn.Locals that were lifted.
+ j := 0
+ for _, l := range fn.Locals {
+ if l.index < 0 {
+ fn.Locals[j] = l
+ j++
+ }
+ }
+ // Nil out fn.Locals[j:] to aid GC.
+ for i := j; i < len(fn.Locals); i++ {
+ fn.Locals[i] = nil
+ }
+ fn.Locals = fn.Locals[:j]
+}
+
+func phiIsLive(phi *Phi) bool {
+ for _, instr := range *phi.Referrers() {
+ if instr == phi {
+ continue // self-refs don't count
+ }
+ if _, ok := instr.(*DebugRef); ok {
+ continue // debug refs don't count
+ }
+ return true
+ }
+ return false
+}
+
+type blockSet struct{ big.Int } // (inherit methods from Int)
+
+// add adds b to the set and returns true if the set changed.
+func (s *blockSet) add(b *BasicBlock) bool {
+ i := b.Index
+ if s.Bit(i) != 0 {
+ return false
+ }
+ s.SetBit(&s.Int, i, 1)
+ return true
+}
+
+// take removes an arbitrary element from a set s and
+// returns its index, or returns -1 if empty.
+func (s *blockSet) take() int {
+ l := s.BitLen()
+ for i := 0; i < l; i++ {
+ if s.Bit(i) == 1 {
+ s.SetBit(&s.Int, i, 0)
+ return i
+ }
+ }
+ return -1
+}
+
+// newPhi is a pair of a newly introduced φ-node and the lifted Alloc
+// it replaces.
+type newPhi struct {
+ phi *Phi
+ alloc *Alloc
+}
+
+// newPhiMap records for each basic block, the set of newPhis that
+// must be prepended to the block.
+type newPhiMap map[*BasicBlock][]newPhi
+
+// liftAlloc determines whether alloc can be lifted into registers,
+// and if so, it populates newPhis with all the φ-nodes it may require
+// and returns true.
+//
+func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap) bool {
+ // Don't lift aggregates into registers, because we don't have
+ // a way to express their zero-constants.
+ switch deref(alloc.Type()).Underlying().(type) {
+ case *types.Array, *types.Struct:
+ return false
+ }
+
+ // Don't lift named return values in functions that defer
+ // calls that may recover from panic.
+ if fn := alloc.Parent(); fn.Recover != nil {
+ for _, nr := range fn.namedResults {
+ if nr == alloc {
+ return false
+ }
+ }
+ }
+
+ // Compute defblocks, the set of blocks containing a
+ // definition of the alloc cell.
+ var defblocks blockSet
+ for _, instr := range *alloc.Referrers() {
+ // Bail out if we discover the alloc is not liftable;
+ // the only operations permitted to use the alloc are
+ // loads/stores into the cell, and DebugRef.
+ switch instr := instr.(type) {
+ case *Store:
+ if instr.Val == alloc {
+ return false // address used as value
+ }
+ if instr.Addr != alloc {
+ panic("Alloc.Referrers is inconsistent")
+ }
+ defblocks.add(instr.Block())
+ case *UnOp:
+ if instr.Op != token.MUL {
+ return false // not a load
+ }
+ if instr.X != alloc {
+ panic("Alloc.Referrers is inconsistent")
+ }
+ case *DebugRef:
+ // ok
+ default:
+ return false // some other instruction
+ }
+ }
+ // The Alloc itself counts as a (zero) definition of the cell.
+ defblocks.add(alloc.Block())
+
+ if debugLifting {
+ fmt.Fprintln(os.Stderr, "\tlifting ", alloc, alloc.Name())
+ }
+
+ fn := alloc.Parent()
+
+ // Φ-insertion.
+ //
+ // What follows is the body of the main loop of the insert-φ
+ // function described by Cytron et al, but instead of using
+ // counter tricks, we just reset the 'hasAlready' and 'work'
+ // sets each iteration. These are bitmaps so it's pretty cheap.
+ //
+ // TODO(adonovan): opt: recycle slice storage for W,
+ // hasAlready, defBlocks across liftAlloc calls.
+ var hasAlready blockSet
+
+ // Initialize W and work to defblocks.
+ var work blockSet = defblocks // blocks seen
+ var W blockSet // blocks to do
+ W.Set(&defblocks.Int)
+
+ // Traverse iterated dominance frontier, inserting φ-nodes.
+ for i := W.take(); i != -1; i = W.take() {
+ u := fn.Blocks[i]
+ for _, v := range df[u.Index] {
+ if hasAlready.add(v) {
+ // Create φ-node.
+ // It will be prepended to v.Instrs later, if needed.
+ phi := &Phi{
+ Edges: make([]Value, len(v.Preds)),
+ Comment: alloc.Comment,
+ }
+ phi.pos = alloc.Pos()
+ phi.setType(deref(alloc.Type()))
+ phi.block = v
+ if debugLifting {
+ fmt.Fprintf(os.Stderr, "\tplace %s = %s at block %s\n", phi.Name(), phi, v)
+ }
+ newPhis[v] = append(newPhis[v], newPhi{phi, alloc})
+
+ if work.add(v) {
+ W.add(v)
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// replaceAll replaces all intraprocedural uses of x with y,
+// updating x.Referrers and y.Referrers.
+// Precondition: x.Referrers() != nil, i.e. x must be local to some function.
+//
+func replaceAll(x, y Value) {
+ var rands []*Value
+ pxrefs := x.Referrers()
+ pyrefs := y.Referrers()
+ for _, instr := range *pxrefs {
+ rands = instr.Operands(rands[:0]) // recycle storage
+ for _, rand := range rands {
+ if *rand != nil {
+ if *rand == x {
+ *rand = y
+ }
+ }
+ }
+ if pyrefs != nil {
+ *pyrefs = append(*pyrefs, instr) // dups ok
+ }
+ }
+ *pxrefs = nil // x is now unreferenced
+}
+
+// renamed returns the value to which alloc is being renamed,
+// constructing it lazily if it's the implicit zero initialization.
+//
+func renamed(renaming []Value, alloc *Alloc) Value {
+ v := renaming[alloc.index]
+ if v == nil {
+ v = zeroConst(deref(alloc.Type()))
+ renaming[alloc.index] = v
+ }
+ return v
+}
+
+// rename implements the (Cytron et al) SSA renaming algorithm, a
+// preorder traversal of the dominator tree replacing all loads of
+// Alloc cells with the value stored to that cell by the dominating
+// store instruction. For lifting, we need only consider loads,
+// stores and φ-nodes.
+//
+// renaming is a map from *Alloc (keyed by index number) to its
+// dominating stored value; newPhis[x] is the set of new φ-nodes to be
+// prepended to block x.
+//
+func rename(u *BasicBlock, renaming []Value, newPhis newPhiMap) {
+ // Each φ-node becomes the new name for its associated Alloc.
+ for _, np := range newPhis[u] {
+ phi := np.phi
+ alloc := np.alloc
+ renaming[alloc.index] = phi
+ }
+
+ // Rename loads and stores of allocs.
+ for i, instr := range u.Instrs {
+ switch instr := instr.(type) {
+ case *Alloc:
+ if instr.index >= 0 { // store of zero to Alloc cell
+ // Replace dominated loads by the zero value.
+ renaming[instr.index] = nil
+ if debugLifting {
+ fmt.Fprintf(os.Stderr, "\tkill alloc %s\n", instr)
+ }
+ // Delete the Alloc.
+ u.Instrs[i] = nil
+ u.gaps++
+ }
+
+ case *Store:
+ if alloc, ok := instr.Addr.(*Alloc); ok && alloc.index >= 0 { // store to Alloc cell
+ // Replace dominated loads by the stored value.
+ renaming[alloc.index] = instr.Val
+ if debugLifting {
+ fmt.Fprintf(os.Stderr, "\tkill store %s; new value: %s\n",
+ instr, instr.Val.Name())
+ }
+ // Remove the store from the referrer list of the stored value.
+ if refs := instr.Val.Referrers(); refs != nil {
+ *refs = removeInstr(*refs, instr)
+ }
+ // Delete the Store.
+ u.Instrs[i] = nil
+ u.gaps++
+ }
+
+ case *UnOp:
+ if instr.Op == token.MUL {
+ if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // load of Alloc cell
+ newval := renamed(renaming, alloc)
+ if debugLifting {
+ fmt.Fprintf(os.Stderr, "\tupdate load %s = %s with %s\n",
+ instr.Name(), instr, newval.Name())
+ }
+ // Replace all references to
+ // the loaded value by the
+ // dominating stored value.
+ replaceAll(instr, newval)
+ // Delete the Load.
+ u.Instrs[i] = nil
+ u.gaps++
+ }
+ }
+
+ case *DebugRef:
+ if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // ref of Alloc cell
+ if instr.IsAddr {
+ instr.X = renamed(renaming, alloc)
+ instr.IsAddr = false
+
+ // Add DebugRef to instr.X's referrers.
+ if refs := instr.X.Referrers(); refs != nil {
+ *refs = append(*refs, instr)
+ }
+ } else {
+ // A source expression denotes the address
+ // of an Alloc that was optimized away.
+ instr.X = nil
+
+ // Delete the DebugRef.
+ u.Instrs[i] = nil
+ u.gaps++
+ }
+ }
+ }
+ }
+
+ // For each φ-node in a CFG successor, rename the edge.
+ for _, v := range u.Succs {
+ phis := newPhis[v]
+ if len(phis) == 0 {
+ continue
+ }
+ i := v.predIndex(u)
+ for _, np := range phis {
+ phi := np.phi
+ alloc := np.alloc
+ newval := renamed(renaming, alloc)
+ if debugLifting {
+ fmt.Fprintf(os.Stderr, "\tsetphi %s edge %s -> %s (#%d) (alloc=%s) := %s\n",
+ phi.Name(), u, v, i, alloc.Name(), newval.Name())
+ }
+ phi.Edges[i] = newval
+ if prefs := newval.Referrers(); prefs != nil {
+ *prefs = append(*prefs, phi)
+ }
+ }
+ }
+
+ // Continue depth-first recursion over domtree, pushing a
+ // fresh copy of the renaming map for each subtree.
+ for _, v := range u.dom.children {
+ // TODO(adonovan): opt: avoid copy on final iteration; use destructive update.
+ r := make([]Value, len(renaming))
+ copy(r, renaming)
+ rename(v, r, newPhis)
+ }
+}
diff --git a/llgo/third_party/go.tools/go/ssa/lvalue.go b/llgo/third_party/go.tools/go/ssa/lvalue.go
new file mode 100644
index 0000000000000000000000000000000000000000..9ad82711679070912413a047e172419d689d79f5
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/lvalue.go
@@ -0,0 +1,122 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// lvalues are the union of addressable expressions and map-index
+// expressions.
+
+import (
+ "go/ast"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// An lvalue represents an assignable location that may appear on the
+// left-hand side of an assignment. This is a generalization of a
+// pointer to permit updates to elements of maps.
+//
+type lvalue interface {
+ store(fn *Function, v Value) // stores v into the location
+ load(fn *Function) Value // loads the contents of the location
+ address(fn *Function) Value // address of the location
+ typ() types.Type // returns the type of the location
+}
+
+// An address is an lvalue represented by a true pointer.
+type address struct {
+ addr Value
+ starPos token.Pos // source position, if from explicit *addr
+ expr ast.Expr // source syntax [debug mode]
+}
+
+func (a *address) load(fn *Function) Value {
+ load := emitLoad(fn, a.addr)
+ load.pos = a.starPos
+ return load
+}
+
+func (a *address) store(fn *Function, v Value) {
+ store := emitStore(fn, a.addr, v)
+ store.pos = a.starPos
+ if a.expr != nil {
+ // store.Val is v, converted for assignability.
+ emitDebugRef(fn, a.expr, store.Val, false)
+ }
+}
+
+func (a *address) address(fn *Function) Value {
+ if a.expr != nil {
+ emitDebugRef(fn, a.expr, a.addr, true)
+ }
+ return a.addr
+}
+
+func (a *address) typ() types.Type {
+ return deref(a.addr.Type())
+}
+
+// An element is an lvalue represented by m[k], the location of an
+// element of a map or string. These locations are not addressable
+// since pointers cannot be formed from them, but they do support
+// load(), and in the case of maps, store().
+//
+type element struct {
+ m, k Value // map or string
+ t types.Type // map element type or string byte type
+ pos token.Pos // source position of colon ({k:v}) or lbrack (m[k]=v)
+}
+
+func (e *element) load(fn *Function) Value {
+ l := &Lookup{
+ X: e.m,
+ Index: e.k,
+ }
+ l.setPos(e.pos)
+ l.setType(e.t)
+ return fn.emit(l)
+}
+
+func (e *element) store(fn *Function, v Value) {
+ up := &MapUpdate{
+ Map: e.m,
+ Key: e.k,
+ Value: emitConv(fn, v, e.t),
+ }
+ up.pos = e.pos
+ fn.emit(up)
+}
+
+func (e *element) address(fn *Function) Value {
+ panic("map/string elements are not addressable")
+}
+
+func (e *element) typ() types.Type {
+ return e.t
+}
+
+// A blank is a dummy variable whose name is "_".
+// It is not reified: loads are illegal and stores are ignored.
+//
+type blank struct{}
+
+func (bl blank) load(fn *Function) Value {
+ panic("blank.load is illegal")
+}
+
+func (bl blank) store(fn *Function, v Value) {
+ // no-op
+}
+
+func (bl blank) address(fn *Function) Value {
+ panic("blank var is not addressable")
+}
+
+func (bl blank) typ() types.Type {
+ // This should be the type of the blank Ident; the typechecker
+ // doesn't provide this yet, but fortunately, we don't need it
+ // yet either.
+ panic("blank.typ is unimplemented")
+}
diff --git a/llgo/third_party/go.tools/go/ssa/methods.go b/llgo/third_party/go.tools/go/ssa/methods.go
new file mode 100644
index 0000000000000000000000000000000000000000..2a92481eac5f5cb4ab4fdb0d0d7ddf8e01dab37e
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/methods.go
@@ -0,0 +1,197 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file defines utilities for population of method sets.
+
+import (
+ "fmt"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// Method returns the Function implementing method sel, building
+// wrapper methods on demand. It returns nil if sel denotes an
+// abstract (interface) method.
+//
+// Precondition: sel.Kind() == MethodVal.
+//
+// TODO(adonovan): rename this to MethodValue because of the
+// precondition, and for consistency with functions in source.go.
+//
+// Thread-safe.
+//
+// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
+//
+func (prog *Program) Method(sel *types.Selection) *Function {
+ if sel.Kind() != types.MethodVal {
+ panic(fmt.Sprintf("Method(%s) kind != MethodVal", sel))
+ }
+ T := sel.Recv()
+ if isInterface(T) {
+ return nil // abstract method
+ }
+ if prog.mode&LogSource != 0 {
+ defer logStack("Method %s %v", T, sel)()
+ }
+
+ prog.methodsMu.Lock()
+ defer prog.methodsMu.Unlock()
+
+ return prog.addMethod(prog.createMethodSet(T), sel)
+}
+
+// LookupMethod returns the implementation of the method of type T
+// identified by (pkg, name). It returns nil if the method exists but
+// is abstract, and panics if T has no such method.
+//
+func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) *Function {
+ sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name)
+ if sel == nil {
+ panic(fmt.Sprintf("%s has no method %s", T, types.Id(pkg, name)))
+ }
+ return prog.Method(sel)
+}
+
+// makeMethods ensures that all concrete methods of type T are
+// generated. It is equivalent to calling prog.Method() on all
+// members of T.methodSet(), but acquires fewer locks.
+//
+// It reports whether the type's (concrete) method set is non-empty.
+//
+// Thread-safe.
+//
+// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
+//
+func (prog *Program) makeMethods(T types.Type) bool {
+ if isInterface(T) {
+ return false // abstract method
+ }
+ tmset := prog.MethodSets.MethodSet(T)
+ n := tmset.Len()
+ if n == 0 {
+ return false // empty (common case)
+ }
+
+ if prog.mode&LogSource != 0 {
+ defer logStack("makeMethods %s", T)()
+ }
+
+ prog.methodsMu.Lock()
+ defer prog.methodsMu.Unlock()
+
+ mset := prog.createMethodSet(T)
+ if !mset.complete {
+ mset.complete = true
+ for i := 0; i < n; i++ {
+ prog.addMethod(mset, tmset.At(i))
+ }
+ }
+
+ return true
+}
+
+// methodSet contains the (concrete) methods of a non-interface type.
+type methodSet struct {
+ mapping map[string]*Function // populated lazily
+ complete bool // mapping contains all methods
+}
+
+// Precondition: !isInterface(T).
+// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
+func (prog *Program) createMethodSet(T types.Type) *methodSet {
+ mset, ok := prog.methodSets.At(T).(*methodSet)
+ if !ok {
+ mset = &methodSet{mapping: make(map[string]*Function)}
+ prog.methodSets.Set(T, mset)
+ }
+ return mset
+}
+
+// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
+func (prog *Program) addMethod(mset *methodSet, sel *types.Selection) *Function {
+ if sel.Kind() == types.MethodExpr {
+ panic(sel)
+ }
+ id := sel.Obj().Id()
+ fn := mset.mapping[id]
+ if fn == nil {
+ obj := sel.Obj().(*types.Func)
+
+ needsPromotion := len(sel.Index()) > 1
+ needsIndirection := !isPointer(recvType(obj)) && isPointer(sel.Recv())
+ if needsPromotion || needsIndirection {
+ fn = makeWrapper(prog, sel)
+ } else {
+ fn = prog.declaredFunc(obj)
+ }
+ if fn.Signature.Recv() == nil {
+ panic(fn) // missing receiver
+ }
+ mset.mapping[id] = fn
+ }
+ return fn
+}
+
+// TypesWithMethodSets returns a new unordered slice containing all
+// concrete types in the program for which a complete (non-empty)
+// method set is required at run-time.
+//
+// It is the union of pkg.TypesWithMethodSets() for all pkg in
+// prog.AllPackages().
+//
+// Thread-safe.
+//
+// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
+//
+func (prog *Program) TypesWithMethodSets() []types.Type {
+ prog.methodsMu.Lock()
+ defer prog.methodsMu.Unlock()
+
+ var res []types.Type
+ prog.methodSets.Iterate(func(T types.Type, v interface{}) {
+ if v.(*methodSet).complete {
+ res = append(res, T)
+ }
+ })
+ return res
+}
+
+// TypesWithMethodSets returns an unordered slice containing the set
+// of all concrete types referenced within package pkg and not
+// belonging to some other package, for which a complete (non-empty)
+// method set is required at run-time.
+//
+// A type belongs to a package if it is a named type or a pointer to a
+// named type, and the name was defined in that package. All other
+// types belong to no package.
+//
+// A type may appear in the TypesWithMethodSets() set of multiple
+// distinct packages if that type belongs to no package. Typical
+// compilers emit method sets for such types multiple times (using
+// weak symbols) into each package that references them, with the
+// linker performing duplicate elimination.
+//
+// This set includes the types of all operands of some MakeInterface
+// instruction, the types of all exported members of some package, and
+// all types that are subcomponents, since even types that aren't used
+// directly may be derived via reflection.
+//
+// Callers must not mutate the result.
+//
+func (pkg *Package) TypesWithMethodSets() []types.Type {
+ // pkg.methodsMu not required; concurrent (build) phase is over.
+ return pkg.methodSets
+}
+
+// declaredFunc returns the concrete function/method denoted by obj.
+// Panic ensues if there is none.
+//
+func (prog *Program) declaredFunc(obj *types.Func) *Function {
+ if v := prog.packageLevelValue(obj); v != nil {
+ return v.(*Function)
+ }
+ panic("no concrete method: " + obj.String())
+}
diff --git a/llgo/third_party/go.tools/go/ssa/print.go b/llgo/third_party/go.tools/go/ssa/print.go
new file mode 100644
index 0000000000000000000000000000000000000000..13a68877fc6a23071371a19672b65ee3acea9a18
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/print.go
@@ -0,0 +1,427 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file implements the String() methods for all Value and
+// Instruction types.
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llgo/third_party/go.tools/go/types/typeutil"
+)
+
+// relName returns the name of v relative to i.
+// In most cases, this is identical to v.Name(), but references to
+// Functions (including methods) and Globals use RelString and
+// all types are displayed with relType, so that only cross-package
+// references are package-qualified.
+//
+func relName(v Value, i Instruction) string {
+ var from *types.Package
+ if i != nil {
+ from = i.Parent().pkgobj()
+ }
+ switch v := v.(type) {
+ case Member: // *Function or *Global
+ return v.RelString(from)
+ case *Const:
+ return v.RelString(from)
+ }
+ return v.Name()
+}
+
+func relType(t types.Type, from *types.Package) string {
+ return types.TypeString(from, t)
+}
+
+func relString(m Member, from *types.Package) string {
+ // NB: not all globals have an Object (e.g. init$guard),
+ // so use Package().Object not Object.Package().
+ if obj := m.Package().Object; obj != nil && obj != from {
+ return fmt.Sprintf("%s.%s", obj.Path(), m.Name())
+ }
+ return m.Name()
+}
+
+// Value.String()
+//
+// This method is provided only for debugging.
+// It never appears in disassembly, which uses Value.Name().
+
+func (v *Parameter) String() string {
+ from := v.Parent().pkgobj()
+ return fmt.Sprintf("parameter %s : %s", v.Name(), relType(v.Type(), from))
+}
+
+func (v *FreeVar) String() string {
+ from := v.Parent().pkgobj()
+ return fmt.Sprintf("freevar %s : %s", v.Name(), relType(v.Type(), from))
+}
+
+func (v *Builtin) String() string {
+ return fmt.Sprintf("builtin %s", v.Name())
+}
+
+// Instruction.String()
+
+func (v *Alloc) String() string {
+ op := "local"
+ if v.Heap {
+ op = "new"
+ }
+ from := v.Parent().pkgobj()
+ return fmt.Sprintf("%s %s (%s)", op, relType(deref(v.Type()), from), v.Comment)
+}
+
+func (v *Phi) String() string {
+ var b bytes.Buffer
+ b.WriteString("phi [")
+ for i, edge := range v.Edges {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ // Be robust against malformed CFG.
+ block := -1
+ if v.block != nil && i < len(v.block.Preds) {
+ block = v.block.Preds[i].Index
+ }
+ fmt.Fprintf(&b, "%d: ", block)
+ edgeVal := "" // be robust
+ if edge != nil {
+ edgeVal = relName(edge, v)
+ }
+ b.WriteString(edgeVal)
+ }
+ b.WriteString("]")
+ if v.Comment != "" {
+ b.WriteString(" #")
+ b.WriteString(v.Comment)
+ }
+ return b.String()
+}
+
+func printCall(v *CallCommon, prefix string, instr Instruction) string {
+ var b bytes.Buffer
+ b.WriteString(prefix)
+ if !v.IsInvoke() {
+ b.WriteString(relName(v.Value, instr))
+ } else {
+ fmt.Fprintf(&b, "invoke %s.%s", relName(v.Value, instr), v.Method.Name())
+ }
+ b.WriteString("(")
+ for i, arg := range v.Args {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString(relName(arg, instr))
+ }
+ if v.Signature().Variadic() {
+ b.WriteString("...")
+ }
+ b.WriteString(")")
+ return b.String()
+}
+
+func (c *CallCommon) String() string {
+ return printCall(c, "", nil)
+}
+
+func (v *Call) String() string {
+ return printCall(&v.Call, "", v)
+}
+
+func (v *BinOp) String() string {
+ return fmt.Sprintf("%s %s %s", relName(v.X, v), v.Op.String(), relName(v.Y, v))
+}
+
+func (v *UnOp) String() string {
+ return fmt.Sprintf("%s%s%s", v.Op, relName(v.X, v), commaOk(v.CommaOk))
+}
+
+func printConv(prefix string, v, x Value) string {
+ from := v.Parent().pkgobj()
+ return fmt.Sprintf("%s %s <- %s (%s)",
+ prefix,
+ relType(v.Type(), from),
+ relType(x.Type(), from),
+ relName(x, v.(Instruction)))
+}
+
+func (v *ChangeType) String() string { return printConv("changetype", v, v.X) }
+func (v *Convert) String() string { return printConv("convert", v, v.X) }
+func (v *ChangeInterface) String() string { return printConv("change interface", v, v.X) }
+func (v *MakeInterface) String() string { return printConv("make", v, v.X) }
+
+func (v *MakeClosure) String() string {
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "make closure %s", relName(v.Fn, v))
+ if v.Bindings != nil {
+ b.WriteString(" [")
+ for i, c := range v.Bindings {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString(relName(c, v))
+ }
+ b.WriteString("]")
+ }
+ return b.String()
+}
+
+func (v *MakeSlice) String() string {
+ from := v.Parent().pkgobj()
+ return fmt.Sprintf("make %s %s %s",
+ relType(v.Type(), from),
+ relName(v.Len, v),
+ relName(v.Cap, v))
+}
+
+func (v *Slice) String() string {
+ var b bytes.Buffer
+ b.WriteString("slice ")
+ b.WriteString(relName(v.X, v))
+ b.WriteString("[")
+ if v.Low != nil {
+ b.WriteString(relName(v.Low, v))
+ }
+ b.WriteString(":")
+ if v.High != nil {
+ b.WriteString(relName(v.High, v))
+ }
+ if v.Max != nil {
+ b.WriteString(":")
+ b.WriteString(relName(v.Max, v))
+ }
+ b.WriteString("]")
+ return b.String()
+}
+
+func (v *MakeMap) String() string {
+ res := ""
+ if v.Reserve != nil {
+ res = relName(v.Reserve, v)
+ }
+ from := v.Parent().pkgobj()
+ return fmt.Sprintf("make %s %s", relType(v.Type(), from), res)
+}
+
+func (v *MakeChan) String() string {
+ from := v.Parent().pkgobj()
+ return fmt.Sprintf("make %s %s", relType(v.Type(), from), relName(v.Size, v))
+}
+
+func (v *FieldAddr) String() string {
+ st := deref(v.X.Type()).Underlying().(*types.Struct)
+ // Be robust against a bad index.
+ name := "?"
+ if 0 <= v.Field && v.Field < st.NumFields() {
+ name = st.Field(v.Field).Name()
+ }
+ return fmt.Sprintf("&%s.%s [#%d]", relName(v.X, v), name, v.Field)
+}
+
+func (v *Field) String() string {
+ st := v.X.Type().Underlying().(*types.Struct)
+ // Be robust against a bad index.
+ name := "?"
+ if 0 <= v.Field && v.Field < st.NumFields() {
+ name = st.Field(v.Field).Name()
+ }
+ return fmt.Sprintf("%s.%s [#%d]", relName(v.X, v), name, v.Field)
+}
+
+func (v *IndexAddr) String() string {
+ return fmt.Sprintf("&%s[%s]", relName(v.X, v), relName(v.Index, v))
+}
+
+func (v *Index) String() string {
+ return fmt.Sprintf("%s[%s]", relName(v.X, v), relName(v.Index, v))
+}
+
+func (v *Lookup) String() string {
+ return fmt.Sprintf("%s[%s]%s", relName(v.X, v), relName(v.Index, v), commaOk(v.CommaOk))
+}
+
+func (v *Range) String() string {
+ return "range " + relName(v.X, v)
+}
+
+func (v *Next) String() string {
+ return "next " + relName(v.Iter, v)
+}
+
+func (v *TypeAssert) String() string {
+ from := v.Parent().pkgobj()
+ return fmt.Sprintf("typeassert%s %s.(%s)", commaOk(v.CommaOk), relName(v.X, v), relType(v.AssertedType, from))
+}
+
+func (v *Extract) String() string {
+ return fmt.Sprintf("extract %s #%d", relName(v.Tuple, v), v.Index)
+}
+
+func (s *Jump) String() string {
+ // Be robust against malformed CFG.
+ block := -1
+ if s.block != nil && len(s.block.Succs) == 1 {
+ block = s.block.Succs[0].Index
+ }
+ return fmt.Sprintf("jump %d", block)
+}
+
+func (s *If) String() string {
+ // Be robust against malformed CFG.
+ tblock, fblock := -1, -1
+ if s.block != nil && len(s.block.Succs) == 2 {
+ tblock = s.block.Succs[0].Index
+ fblock = s.block.Succs[1].Index
+ }
+ return fmt.Sprintf("if %s goto %d else %d", relName(s.Cond, s), tblock, fblock)
+}
+
+func (s *Go) String() string {
+ return printCall(&s.Call, "go ", s)
+}
+
+func (s *Panic) String() string {
+ return "panic " + relName(s.X, s)
+}
+
+func (s *Return) String() string {
+ var b bytes.Buffer
+ b.WriteString("return")
+ for i, r := range s.Results {
+ if i == 0 {
+ b.WriteString(" ")
+ } else {
+ b.WriteString(", ")
+ }
+ b.WriteString(relName(r, s))
+ }
+ return b.String()
+}
+
+func (*RunDefers) String() string {
+ return "rundefers"
+}
+
+func (s *Send) String() string {
+ return fmt.Sprintf("send %s <- %s", relName(s.Chan, s), relName(s.X, s))
+}
+
+func (s *Defer) String() string {
+ return printCall(&s.Call, "defer ", s)
+}
+
+func (s *Select) String() string {
+ var b bytes.Buffer
+ for i, st := range s.States {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ if st.Dir == types.RecvOnly {
+ b.WriteString("<-")
+ b.WriteString(relName(st.Chan, s))
+ } else {
+ b.WriteString(relName(st.Chan, s))
+ b.WriteString("<-")
+ b.WriteString(relName(st.Send, s))
+ }
+ }
+ non := ""
+ if !s.Blocking {
+ non = "non"
+ }
+ return fmt.Sprintf("select %sblocking [%s]", non, b.String())
+}
+
+func (s *Store) String() string {
+ return fmt.Sprintf("*%s = %s", relName(s.Addr, s), relName(s.Val, s))
+}
+
+func (s *MapUpdate) String() string {
+ return fmt.Sprintf("%s[%s] = %s", relName(s.Map, s), relName(s.Key, s), relName(s.Value, s))
+}
+
+func (s *DebugRef) String() string {
+ p := s.Parent().Prog.Fset.Position(s.Pos())
+ var descr interface{}
+ if s.object != nil {
+ descr = s.object // e.g. "var x int"
+ } else {
+ descr = reflect.TypeOf(s.Expr) // e.g. "*ast.CallExpr"
+ }
+ var addr string
+ if s.IsAddr {
+ addr = "address of "
+ }
+ return fmt.Sprintf("; %s%s @ %d:%d is %s", addr, descr, p.Line, p.Column, s.X.Name())
+}
+
+func (p *Package) String() string {
+ return "package " + p.Object.Path()
+}
+
+var _ io.WriterTo = (*Package)(nil) // *Package implements io.Writer
+
+func (p *Package) WriteTo(w io.Writer) (int64, error) {
+ var buf bytes.Buffer
+ WritePackage(&buf, p)
+ n, err := w.Write(buf.Bytes())
+ return int64(n), err
+}
+
+// WritePackage writes to buf a human-readable summary of p.
+func WritePackage(buf *bytes.Buffer, p *Package) {
+ fmt.Fprintf(buf, "%s:\n", p)
+
+ var names []string
+ maxname := 0
+ for name := range p.Members {
+ if l := len(name); l > maxname {
+ maxname = l
+ }
+ names = append(names, name)
+ }
+
+ from := p.Object
+ sort.Strings(names)
+ for _, name := range names {
+ switch mem := p.Members[name].(type) {
+ case *NamedConst:
+ fmt.Fprintf(buf, " const %-*s %s = %s\n",
+ maxname, name, mem.Name(), mem.Value.RelString(from))
+
+ case *Function:
+ fmt.Fprintf(buf, " func %-*s %s\n",
+ maxname, name, relType(mem.Type(), from))
+
+ case *Type:
+ fmt.Fprintf(buf, " type %-*s %s\n",
+ maxname, name, relType(mem.Type().Underlying(), from))
+ for _, meth := range typeutil.IntuitiveMethodSet(mem.Type(), &p.Prog.MethodSets) {
+ fmt.Fprintf(buf, " %s\n", types.SelectionString(from, meth))
+ }
+
+ case *Global:
+ fmt.Fprintf(buf, " var %-*s %s\n",
+ maxname, name, relType(mem.Type().(*types.Pointer).Elem(), from))
+ }
+ }
+
+ fmt.Fprintf(buf, "\n")
+}
+
+func commaOk(x bool) string {
+ if x {
+ return ",ok"
+ }
+ return ""
+}
diff --git a/llgo/third_party/go.tools/go/ssa/sanity.go b/llgo/third_party/go.tools/go/ssa/sanity.go
new file mode 100644
index 0000000000000000000000000000000000000000..c6d2ba173bc63790073fd8b2b352d14b0e3e60cc
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/sanity.go
@@ -0,0 +1,515 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// An optional pass for sanity-checking invariants of the SSA representation.
+// Currently it checks CFG invariants but little at the instruction level.
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+type sanity struct {
+ reporter io.Writer
+ fn *Function
+ block *BasicBlock
+ instrs map[Instruction]struct{}
+ insane bool
+}
+
+// sanityCheck performs integrity checking of the SSA representation
+// of the function fn and returns true if it was valid. Diagnostics
+// are written to reporter if non-nil, os.Stderr otherwise. Some
+// diagnostics are only warnings and do not imply a negative result.
+//
+// Sanity-checking is intended to facilitate the debugging of code
+// transformation passes.
+//
+func sanityCheck(fn *Function, reporter io.Writer) bool {
+ if reporter == nil {
+ reporter = os.Stderr
+ }
+ return (&sanity{reporter: reporter}).checkFunction(fn)
+}
+
+// mustSanityCheck is like sanityCheck but panics instead of returning
+// a negative result.
+//
+func mustSanityCheck(fn *Function, reporter io.Writer) {
+ if !sanityCheck(fn, reporter) {
+ fn.WriteTo(os.Stderr)
+ panic("SanityCheck failed")
+ }
+}
+
+func (s *sanity) diagnostic(prefix, format string, args ...interface{}) {
+ fmt.Fprintf(s.reporter, "%s: function %s", prefix, s.fn)
+ if s.block != nil {
+ fmt.Fprintf(s.reporter, ", block %s", s.block)
+ }
+ io.WriteString(s.reporter, ": ")
+ fmt.Fprintf(s.reporter, format, args...)
+ io.WriteString(s.reporter, "\n")
+}
+
+func (s *sanity) errorf(format string, args ...interface{}) {
+ s.insane = true
+ s.diagnostic("Error", format, args...)
+}
+
+func (s *sanity) warnf(format string, args ...interface{}) {
+ s.diagnostic("Warning", format, args...)
+}
+
+// findDuplicate returns an arbitrary basic block that appeared more
+// than once in blocks, or nil if all were unique.
+func findDuplicate(blocks []*BasicBlock) *BasicBlock {
+ if len(blocks) < 2 {
+ return nil
+ }
+ if blocks[0] == blocks[1] {
+ return blocks[0]
+ }
+ // Slow path:
+ m := make(map[*BasicBlock]bool)
+ for _, b := range blocks {
+ if m[b] {
+ return b
+ }
+ m[b] = true
+ }
+ return nil
+}
+
+func (s *sanity) checkInstr(idx int, instr Instruction) {
+ switch instr := instr.(type) {
+ case *If, *Jump, *Return, *Panic:
+ s.errorf("control flow instruction not at end of block")
+ case *Phi:
+ if idx == 0 {
+ // It suffices to apply this check to just the first phi node.
+ if dup := findDuplicate(s.block.Preds); dup != nil {
+ s.errorf("phi node in block with duplicate predecessor %s", dup)
+ }
+ } else {
+ prev := s.block.Instrs[idx-1]
+ if _, ok := prev.(*Phi); !ok {
+ s.errorf("Phi instruction follows a non-Phi: %T", prev)
+ }
+ }
+ if ne, np := len(instr.Edges), len(s.block.Preds); ne != np {
+ s.errorf("phi node has %d edges but %d predecessors", ne, np)
+
+ } else {
+ for i, e := range instr.Edges {
+ if e == nil {
+ s.errorf("phi node '%s' has no value for edge #%d from %s", instr.Comment, i, s.block.Preds[i])
+ }
+ }
+ }
+
+ case *Alloc:
+ if !instr.Heap {
+ found := false
+ for _, l := range s.fn.Locals {
+ if l == instr {
+ found = true
+ break
+ }
+ }
+ if !found {
+ s.errorf("local alloc %s = %s does not appear in Function.Locals", instr.Name(), instr)
+ }
+ }
+
+ case *BinOp:
+ case *Call:
+ case *ChangeInterface:
+ case *ChangeType:
+ case *Convert:
+ if _, ok := instr.X.Type().Underlying().(*types.Basic); !ok {
+ if _, ok := instr.Type().Underlying().(*types.Basic); !ok {
+ s.errorf("convert %s -> %s: at least one type must be basic", instr.X.Type(), instr.Type())
+ }
+ }
+
+ case *Defer:
+ case *Extract:
+ case *Field:
+ case *FieldAddr:
+ case *Go:
+ case *Index:
+ case *IndexAddr:
+ case *Lookup:
+ case *MakeChan:
+ case *MakeClosure:
+ numFree := len(instr.Fn.(*Function).FreeVars)
+ numBind := len(instr.Bindings)
+ if numFree != numBind {
+ s.errorf("MakeClosure has %d Bindings for function %s with %d free vars",
+ numBind, instr.Fn, numFree)
+
+ }
+ if recv := instr.Type().(*types.Signature).Recv(); recv != nil {
+ s.errorf("MakeClosure's type includes receiver %s", recv.Type())
+ }
+
+ case *MakeInterface:
+ case *MakeMap:
+ case *MakeSlice:
+ case *MapUpdate:
+ case *Next:
+ case *Range:
+ case *RunDefers:
+ case *Select:
+ case *Send:
+ case *Slice:
+ case *Store:
+ case *TypeAssert:
+ case *UnOp:
+ case *DebugRef:
+ // TODO(adonovan): implement checks.
+ default:
+ panic(fmt.Sprintf("Unknown instruction type: %T", instr))
+ }
+
+ if call, ok := instr.(CallInstruction); ok {
+ if call.Common().Signature() == nil {
+ s.errorf("nil signature: %s", call)
+ }
+ }
+
+ // Check that value-defining instructions have valid types
+ // and a valid referrer list.
+ if v, ok := instr.(Value); ok {
+ t := v.Type()
+ if t == nil {
+ s.errorf("no type: %s = %s", v.Name(), v)
+ } else if t == tRangeIter {
+ // not a proper type; ignore.
+ } else if b, ok := t.Underlying().(*types.Basic); ok && b.Info()&types.IsUntyped != 0 {
+ s.errorf("instruction has 'untyped' result: %s = %s : %s", v.Name(), v, t)
+ }
+ s.checkReferrerList(v)
+ }
+
+ // Untyped constants are legal as instruction Operands(),
+ // for example:
+ // _ = "foo"[0]
+ // or:
+ // if wordsize==64 {...}
+
+ // All other non-Instruction Values can be found via their
+ // enclosing Function or Package.
+}
+
+func (s *sanity) checkFinalInstr(idx int, instr Instruction) {
+ switch instr := instr.(type) {
+ case *If:
+ if nsuccs := len(s.block.Succs); nsuccs != 2 {
+ s.errorf("If-terminated block has %d successors; expected 2", nsuccs)
+ return
+ }
+ if s.block.Succs[0] == s.block.Succs[1] {
+ s.errorf("If-instruction has same True, False target blocks: %s", s.block.Succs[0])
+ return
+ }
+
+ case *Jump:
+ if nsuccs := len(s.block.Succs); nsuccs != 1 {
+ s.errorf("Jump-terminated block has %d successors; expected 1", nsuccs)
+ return
+ }
+
+ case *Return:
+ if nsuccs := len(s.block.Succs); nsuccs != 0 {
+ s.errorf("Return-terminated block has %d successors; expected none", nsuccs)
+ return
+ }
+ if na, nf := len(instr.Results), s.fn.Signature.Results().Len(); nf != na {
+ s.errorf("%d-ary return in %d-ary function", na, nf)
+ }
+
+ case *Panic:
+ if nsuccs := len(s.block.Succs); nsuccs != 0 {
+ s.errorf("Panic-terminated block has %d successors; expected none", nsuccs)
+ return
+ }
+
+ default:
+ s.errorf("non-control flow instruction at end of block")
+ }
+}
+
+func (s *sanity) checkBlock(b *BasicBlock, index int) {
+ s.block = b
+
+ if b.Index != index {
+ s.errorf("block has incorrect Index %d", b.Index)
+ }
+ if b.parent != s.fn {
+ s.errorf("block has incorrect parent %s", b.parent)
+ }
+
+ // Check all blocks are reachable.
+ // (The entry block is always implicitly reachable,
+ // as is the Recover block, if any.)
+ if (index > 0 && b != b.parent.Recover) && len(b.Preds) == 0 {
+ s.warnf("unreachable block")
+ if b.Instrs == nil {
+ // Since this block is about to be pruned,
+ // tolerating transient problems in it
+ // simplifies other optimizations.
+ return
+ }
+ }
+
+ // Check predecessor and successor relations are dual,
+ // and that all blocks in CFG belong to same function.
+ for _, a := range b.Preds {
+ found := false
+ for _, bb := range a.Succs {
+ if bb == b {
+ found = true
+ break
+ }
+ }
+ if !found {
+ s.errorf("expected successor edge in predecessor %s; found only: %s", a, a.Succs)
+ }
+ if a.parent != s.fn {
+ s.errorf("predecessor %s belongs to different function %s", a, a.parent)
+ }
+ }
+ for _, c := range b.Succs {
+ found := false
+ for _, bb := range c.Preds {
+ if bb == b {
+ found = true
+ break
+ }
+ }
+ if !found {
+ s.errorf("expected predecessor edge in successor %s; found only: %s", c, c.Preds)
+ }
+ if c.parent != s.fn {
+ s.errorf("successor %s belongs to different function %s", c, c.parent)
+ }
+ }
+
+ // Check each instruction is sane.
+ n := len(b.Instrs)
+ if n == 0 {
+ s.errorf("basic block contains no instructions")
+ }
+ var rands [10]*Value // reuse storage
+ for j, instr := range b.Instrs {
+ if instr == nil {
+ s.errorf("nil instruction at index %d", j)
+ continue
+ }
+ if b2 := instr.Block(); b2 == nil {
+ s.errorf("nil Block() for instruction at index %d", j)
+ continue
+ } else if b2 != b {
+ s.errorf("wrong Block() (%s) for instruction at index %d ", b2, j)
+ continue
+ }
+ if j < n-1 {
+ s.checkInstr(j, instr)
+ } else {
+ s.checkFinalInstr(j, instr)
+ }
+
+ // Check Instruction.Operands.
+ operands:
+ for i, op := range instr.Operands(rands[:0]) {
+ if op == nil {
+ s.errorf("nil operand pointer %d of %s", i, instr)
+ continue
+ }
+ val := *op
+ if val == nil {
+ continue // a nil operand is ok
+ }
+
+ // Check that "untyped" types only appear on constant operands.
+ if _, ok := (*op).(*Const); !ok {
+ if basic, ok := (*op).Type().(*types.Basic); ok {
+ if basic.Info()&types.IsUntyped != 0 {
+ s.errorf("operand #%d of %s is untyped: %s", i, instr, basic)
+ }
+ }
+ }
+
+ // Check that Operands that are also Instructions belong to same function.
+ // TODO(adonovan): also check their block dominates block b.
+ if val, ok := val.(Instruction); ok {
+ if val.Parent() != s.fn {
+ s.errorf("operand %d of %s is an instruction (%s) from function %s", i, instr, val, val.Parent())
+ }
+ }
+
+ // Check that each function-local operand of
+ // instr refers back to instr. (NB: quadratic)
+ switch val := val.(type) {
+ case *Const, *Global, *Builtin:
+ continue // not local
+ case *Function:
+ if val.parent == nil {
+ continue // only anon functions are local
+ }
+ }
+
+ // TODO(adonovan): check val.Parent() != nil <=> val.Referrers() is defined.
+
+ if refs := val.Referrers(); refs != nil {
+ for _, ref := range *refs {
+ if ref == instr {
+ continue operands
+ }
+ }
+ s.errorf("operand %d of %s (%s) does not refer to us", i, instr, val)
+ } else {
+ s.errorf("operand %d of %s (%s) has no referrers", i, instr, val)
+ }
+ }
+ }
+}
+
+func (s *sanity) checkReferrerList(v Value) {
+ refs := v.Referrers()
+ if refs == nil {
+ s.errorf("%s has missing referrer list", v.Name())
+ return
+ }
+ for i, ref := range *refs {
+ if _, ok := s.instrs[ref]; !ok {
+ s.errorf("%s.Referrers()[%d] = %s is not an instruction belonging to this function", v.Name(), i, ref)
+ }
+ }
+}
+
+func (s *sanity) checkFunction(fn *Function) bool {
+ // TODO(adonovan): check Function invariants:
+ // - check params match signature
+ // - check transient fields are nil
+ // - warn if any fn.Locals do not appear among block instructions.
+ s.fn = fn
+ if fn.Prog == nil {
+ s.errorf("nil Prog")
+ }
+
+ fn.String() // must not crash
+ fn.RelString(fn.pkgobj()) // must not crash
+
+ // All functions have a package, except delegates (which are
+ // shared across packages, or duplicated as weak symbols in a
+ // separate-compilation model), and error.Error.
+ if fn.Pkg == nil {
+ if strings.HasPrefix(fn.Synthetic, "wrapper ") ||
+ strings.HasPrefix(fn.Synthetic, "bound ") ||
+ strings.HasPrefix(fn.Synthetic, "thunk ") ||
+ strings.HasSuffix(fn.name, "Error") {
+ // ok
+ } else {
+ s.errorf("nil Pkg")
+ }
+ }
+ if src, syn := fn.Synthetic == "", fn.Syntax() != nil; src != syn {
+ s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn)
+ }
+ for i, l := range fn.Locals {
+ if l.Parent() != fn {
+ s.errorf("Local %s at index %d has wrong parent", l.Name(), i)
+ }
+ if l.Heap {
+ s.errorf("Local %s at index %d has Heap flag set", l.Name(), i)
+ }
+ }
+ // Build the set of valid referrers.
+ s.instrs = make(map[Instruction]struct{})
+ for _, b := range fn.Blocks {
+ for _, instr := range b.Instrs {
+ s.instrs[instr] = struct{}{}
+ }
+ }
+ for i, p := range fn.Params {
+ if p.Parent() != fn {
+ s.errorf("Param %s at index %d has wrong parent", p.Name(), i)
+ }
+ s.checkReferrerList(p)
+ }
+ for i, fv := range fn.FreeVars {
+ if fv.Parent() != fn {
+ s.errorf("FreeVar %s at index %d has wrong parent", fv.Name(), i)
+ }
+ s.checkReferrerList(fv)
+ }
+
+ if fn.Blocks != nil && len(fn.Blocks) == 0 {
+ // Function _had_ blocks (so it's not external) but
+ // they were "optimized" away, even the entry block.
+ s.errorf("Blocks slice is non-nil but empty")
+ }
+ for i, b := range fn.Blocks {
+ if b == nil {
+ s.warnf("nil *BasicBlock at f.Blocks[%d]", i)
+ continue
+ }
+ s.checkBlock(b, i)
+ }
+ if fn.Recover != nil && fn.Blocks[fn.Recover.Index] != fn.Recover {
+ s.errorf("Recover block is not in Blocks slice")
+ }
+
+ s.block = nil
+ for i, anon := range fn.AnonFuncs {
+ if anon.Parent() != fn {
+ s.errorf("AnonFuncs[%d]=%s but %s.Parent()=%s", i, anon, anon, anon.Parent())
+ }
+ }
+ s.fn = nil
+ return !s.insane
+}
+
+// sanityCheckPackage checks invariants of packages upon creation.
+// It does not require that the package is built.
+// Unlike sanityCheck (for functions), it just panics at the first error.
+func sanityCheckPackage(pkg *Package) {
+ if pkg.Object == nil {
+ panic(fmt.Sprintf("Package %s has no Object", pkg))
+ }
+ pkg.String() // must not crash
+
+ for name, mem := range pkg.Members {
+ if name != mem.Name() {
+ panic(fmt.Sprintf("%s: %T.Name() = %s, want %s",
+ pkg.Object.Path(), mem, mem.Name(), name))
+ }
+ obj := mem.Object()
+ if obj == nil {
+ // This check is sound because fields
+ // {Global,Function}.object have type
+ // types.Object. (If they were declared as
+ // *types.{Var,Func}, we'd have a non-empty
+ // interface containing a nil pointer.)
+
+ continue // not all members have typechecker objects
+ }
+ if obj.Name() != name {
+ panic(fmt.Sprintf("%s: %T.Object().Name() = %s, want %s",
+ pkg.Object.Path(), mem, obj.Name(), name))
+ }
+ if obj.Pos() != mem.Pos() {
+ panic(fmt.Sprintf("%s Pos=%d obj.Pos=%d", mem, mem.Pos(), obj.Pos()))
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/go/ssa/source.go b/llgo/third_party/go.tools/go/ssa/source.go
new file mode 100644
index 0000000000000000000000000000000000000000..764973548a3a316ab96aded335e8d7b1d42df8e7
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/source.go
@@ -0,0 +1,294 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file defines utilities for working with source positions
+// or source-level named entities ("objects").
+
+// TODO(adonovan): test that {Value,Instruction}.Pos() positions match
+// the originating syntax, as specified.
+
+import (
+ "go/ast"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// EnclosingFunction returns the function that contains the syntax
+// node denoted by path.
+//
+// Syntax associated with package-level variable specifications is
+// enclosed by the package's init() function.
+//
+// Returns nil if not found; reasons might include:
+// - the node is not enclosed by any function.
+// - the node is within an anonymous function (FuncLit) and
+// its SSA function has not been created yet
+// (pkg.Build() has not yet been called).
+//
+func EnclosingFunction(pkg *Package, path []ast.Node) *Function {
+ // Start with package-level function...
+ fn := findEnclosingPackageLevelFunction(pkg, path)
+ if fn == nil {
+ return nil // not in any function
+ }
+
+ // ...then walk down the nested anonymous functions.
+ n := len(path)
+outer:
+ for i := range path {
+ if lit, ok := path[n-1-i].(*ast.FuncLit); ok {
+ for _, anon := range fn.AnonFuncs {
+ if anon.Pos() == lit.Type.Func {
+ fn = anon
+ continue outer
+ }
+ }
+ // SSA function not found:
+ // - package not yet built, or maybe
+ // - builder skipped FuncLit in dead block
+ // (in principle; but currently the Builder
+ // generates even dead FuncLits).
+ return nil
+ }
+ }
+ return fn
+}
+
+// HasEnclosingFunction returns true if the AST node denoted by path
+// is contained within the declaration of some function or
+// package-level variable.
+//
+// Unlike EnclosingFunction, the behaviour of this function does not
+// depend on whether SSA code for pkg has been built, so it can be
+// used to quickly reject check inputs that will cause
+// EnclosingFunction to fail, prior to SSA building.
+//
+func HasEnclosingFunction(pkg *Package, path []ast.Node) bool {
+ return findEnclosingPackageLevelFunction(pkg, path) != nil
+}
+
+// findEnclosingPackageLevelFunction returns the Function
+// corresponding to the package-level function enclosing path.
+//
+func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function {
+ if n := len(path); n >= 2 { // [... {Gen,Func}Decl File]
+ switch decl := path[n-2].(type) {
+ case *ast.GenDecl:
+ if decl.Tok == token.VAR && n >= 3 {
+ // Package-level 'var' initializer.
+ return pkg.init
+ }
+
+ case *ast.FuncDecl:
+ if decl.Recv == nil && decl.Name.Name == "init" {
+ // Explicit init() function.
+ for _, b := range pkg.init.Blocks {
+ for _, instr := range b.Instrs {
+ if instr, ok := instr.(*Call); ok {
+ if callee, ok := instr.Call.Value.(*Function); ok && callee.Pkg == pkg && callee.Pos() == decl.Name.NamePos {
+ return callee
+ }
+ }
+ }
+ }
+ // Hack: return non-nil when SSA is not yet
+ // built so that HasEnclosingFunction works.
+ return pkg.init
+ }
+ // Declared function/method.
+ return findNamedFunc(pkg, decl.Name.NamePos)
+ }
+ }
+ return nil // not in any function
+}
+
+// findNamedFunc returns the named function whose FuncDecl.Ident is at
+// position pos.
+//
+func findNamedFunc(pkg *Package, pos token.Pos) *Function {
+ // Look at all package members and method sets of named types.
+ // Not very efficient.
+ for _, mem := range pkg.Members {
+ switch mem := mem.(type) {
+ case *Function:
+ if mem.Pos() == pos {
+ return mem
+ }
+ case *Type:
+ mset := pkg.Prog.MethodSets.MethodSet(types.NewPointer(mem.Type()))
+ for i, n := 0, mset.Len(); i < n; i++ {
+ // Don't call Program.Method: avoid creating wrappers.
+ obj := mset.At(i).Obj().(*types.Func)
+ if obj.Pos() == pos {
+ return pkg.values[obj].(*Function)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// ValueForExpr returns the SSA Value that corresponds to non-constant
+// expression e.
+//
+// It returns nil if no value was found, e.g.
+// - the expression is not lexically contained within f;
+// - f was not built with debug information; or
+// - e is a constant expression. (For efficiency, no debug
+// information is stored for constants. Use
+// loader.PackageInfo.ValueOf(e) instead.)
+// - e is a reference to nil or a built-in function.
+// - the value was optimised away.
+//
+// If e is an addressable expression used an an lvalue context,
+// value is the address denoted by e, and isAddr is true.
+//
+// The types of e (or &e, if isAddr) and the result are equal
+// (modulo "untyped" bools resulting from comparisons).
+//
+// (Tip: to find the ssa.Value given a source position, use
+// importer.PathEnclosingInterval to locate the ast.Node, then
+// EnclosingFunction to locate the Function, then ValueForExpr to find
+// the ssa.Value.)
+//
+func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) {
+ if f.debugInfo() { // (opt)
+ e = unparen(e)
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ if ref, ok := instr.(*DebugRef); ok {
+ if ref.Expr == e {
+ return ref.X, ref.IsAddr
+ }
+ }
+ }
+ }
+ }
+ return
+}
+
+// --- Lookup functions for source-level named entities (types.Objects) ---
+
+// Package returns the SSA Package corresponding to the specified
+// type-checker package object.
+// It returns nil if no such SSA package has been created.
+//
+func (prog *Program) Package(obj *types.Package) *Package {
+ return prog.packages[obj]
+}
+
+// packageLevelValue returns the package-level value corresponding to
+// the specified named object, which may be a package-level const
+// (*Const), var (*Global) or func (*Function) of some package in
+// prog. It returns nil if the object is not found.
+//
+func (prog *Program) packageLevelValue(obj types.Object) Value {
+ if pkg, ok := prog.packages[obj.Pkg()]; ok {
+ return pkg.values[obj]
+ }
+ return nil
+}
+
+// FuncValue returns the concrete Function denoted by the source-level
+// named function obj, or nil if obj denotes an interface method.
+//
+// TODO(adonovan): check the invariant that obj.Type() matches the
+// result's Signature, both in the params/results and in the receiver.
+//
+func (prog *Program) FuncValue(obj *types.Func) *Function {
+ fn, _ := prog.packageLevelValue(obj).(*Function)
+ return fn
+}
+
+// ConstValue returns the SSA Value denoted by the source-level named
+// constant obj.
+//
+func (prog *Program) ConstValue(obj *types.Const) *Const {
+ // TODO(adonovan): opt: share (don't reallocate)
+ // Consts for const objects and constant ast.Exprs.
+
+ // Universal constant? {true,false,nil}
+ if obj.Parent() == types.Universe {
+ return NewConst(obj.Val(), obj.Type())
+ }
+ // Package-level named constant?
+ if v := prog.packageLevelValue(obj); v != nil {
+ return v.(*Const)
+ }
+ return NewConst(obj.Val(), obj.Type())
+}
+
+// VarValue returns the SSA Value that corresponds to a specific
+// identifier denoting the source-level named variable obj.
+//
+// VarValue returns nil if a local variable was not found, perhaps
+// because its package was not built, the debug information was not
+// requested during SSA construction, or the value was optimized away.
+//
+// ref is the path to an ast.Ident (e.g. from PathEnclosingInterval),
+// and that ident must resolve to obj.
+//
+// pkg is the package enclosing the reference. (A reference to a var
+// always occurs within a function, so we need to know where to find it.)
+//
+// If the identifier is a field selector and its base expression is
+// non-addressable, then VarValue returns the value of that field.
+// For example:
+// func f() struct {x int}
+// f().x // VarValue(x) returns a *Field instruction of type int
+//
+// All other identifiers denote addressable locations (variables).
+// For them, VarValue may return either the variable's address or its
+// value, even when the expression is evaluated only for its value; the
+// situation is reported by isAddr, the second component of the result.
+//
+// If !isAddr, the returned value is the one associated with the
+// specific identifier. For example,
+// var x int // VarValue(x) returns Const 0 here
+// x = 1 // VarValue(x) returns Const 1 here
+//
+// It is not specified whether the value or the address is returned in
+// any particular case, as it may depend upon optimizations performed
+// during SSA code generation, such as registerization, constant
+// folding, avoidance of materialization of subexpressions, etc.
+//
+func (prog *Program) VarValue(obj *types.Var, pkg *Package, ref []ast.Node) (value Value, isAddr bool) {
+ // All references to a var are local to some function, possibly init.
+ fn := EnclosingFunction(pkg, ref)
+ if fn == nil {
+ return // e.g. def of struct field; SSA not built?
+ }
+
+ id := ref[0].(*ast.Ident)
+
+ // Defining ident of a parameter?
+ if id.Pos() == obj.Pos() {
+ for _, param := range fn.Params {
+ if param.Object() == obj {
+ return param, false
+ }
+ }
+ }
+
+ // Other ident?
+ for _, b := range fn.Blocks {
+ for _, instr := range b.Instrs {
+ if dr, ok := instr.(*DebugRef); ok {
+ if dr.Pos() == id.Pos() {
+ return dr.X, dr.IsAddr
+ }
+ }
+ }
+ }
+
+ // Defining ident of package-level var?
+ if v := prog.packageLevelValue(obj); v != nil {
+ return v.(*Global), true
+ }
+
+ return // e.g. debug info not requested, or var optimized away
+}
diff --git a/llgo/third_party/go.tools/go/ssa/source_test.go b/llgo/third_party/go.tools/go/ssa/source_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..5b1cb79cd862a2501de090b68bd117522e8728c7
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/source_test.go
@@ -0,0 +1,276 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+// This file defines tests of source-level debugging utilities.
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "os"
+ "regexp"
+ "strings"
+ "testing"
+
+ "llvm.org/llgo/third_party/go.tools/astutil"
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+func TestObjValueLookup(t *testing.T) {
+ conf := loader.Config{ParserMode: parser.ParseComments}
+ f, err := conf.ParseFile("testdata/objlookup.go", nil)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ conf.CreateFromFiles("main", f)
+
+ // Maps each var Ident (represented "name:linenum") to the
+ // kind of ssa.Value we expect (represented "Constant", "&Alloc").
+ expectations := make(map[string]string)
+
+ // Find all annotations of form x::BinOp, &y::Alloc, etc.
+ re := regexp.MustCompile(`(\b|&)?(\w*)::(\w*)\b`)
+ for _, c := range f.Comments {
+ text := c.Text()
+ pos := conf.Fset.Position(c.Pos())
+ for _, m := range re.FindAllStringSubmatch(text, -1) {
+ key := fmt.Sprintf("%s:%d", m[2], pos.Line)
+ value := m[1] + m[3]
+ expectations[key] = value
+ }
+ }
+
+ iprog, err := conf.Load()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ prog := ssa.Create(iprog, 0 /*|ssa.PrintFunctions*/)
+ mainInfo := iprog.Created[0]
+ mainPkg := prog.Package(mainInfo.Pkg)
+ mainPkg.SetDebugMode(true)
+ mainPkg.Build()
+
+ var varIds []*ast.Ident
+ var varObjs []*types.Var
+ for id, obj := range mainInfo.Defs {
+ // Check invariants for func and const objects.
+ switch obj := obj.(type) {
+ case *types.Func:
+ checkFuncValue(t, prog, obj)
+
+ case *types.Const:
+ checkConstValue(t, prog, obj)
+
+ case *types.Var:
+ if id.Name == "_" {
+ continue
+ }
+ varIds = append(varIds, id)
+ varObjs = append(varObjs, obj)
+ }
+ }
+ for id, obj := range mainInfo.Uses {
+ if obj, ok := obj.(*types.Var); ok {
+ varIds = append(varIds, id)
+ varObjs = append(varObjs, obj)
+ }
+ }
+
+ // Check invariants for var objects.
+ // The result varies based on the specific Ident.
+ for i, id := range varIds {
+ obj := varObjs[i]
+ ref, _ := astutil.PathEnclosingInterval(f, id.Pos(), id.Pos())
+ pos := prog.Fset.Position(id.Pos())
+ exp := expectations[fmt.Sprintf("%s:%d", id.Name, pos.Line)]
+ if exp == "" {
+ t.Errorf("%s: no expectation for var ident %s ", pos, id.Name)
+ continue
+ }
+ wantAddr := false
+ if exp[0] == '&' {
+ wantAddr = true
+ exp = exp[1:]
+ }
+ checkVarValue(t, prog, mainPkg, ref, obj, exp, wantAddr)
+ }
+}
+
+func checkFuncValue(t *testing.T, prog *ssa.Program, obj *types.Func) {
+ fn := prog.FuncValue(obj)
+ // fmt.Printf("FuncValue(%s) = %s\n", obj, fn) // debugging
+ if fn == nil {
+ if obj.Name() != "interfaceMethod" {
+ t.Errorf("FuncValue(%s) == nil", obj)
+ }
+ return
+ }
+ if fnobj := fn.Object(); fnobj != obj {
+ t.Errorf("FuncValue(%s).Object() == %s; value was %s",
+ obj, fnobj, fn.Name())
+ return
+ }
+ if !types.Identical(fn.Type(), obj.Type()) {
+ t.Errorf("FuncValue(%s).Type() == %s", obj, fn.Type())
+ return
+ }
+}
+
+func checkConstValue(t *testing.T, prog *ssa.Program, obj *types.Const) {
+ c := prog.ConstValue(obj)
+ // fmt.Printf("ConstValue(%s) = %s\n", obj, c) // debugging
+ if c == nil {
+ t.Errorf("ConstValue(%s) == nil", obj)
+ return
+ }
+ if !types.Identical(c.Type(), obj.Type()) {
+ t.Errorf("ConstValue(%s).Type() == %s", obj, c.Type())
+ return
+ }
+ if obj.Name() != "nil" {
+ if !exact.Compare(c.Value, token.EQL, obj.Val()) {
+ t.Errorf("ConstValue(%s).Value (%s) != %s",
+ obj, c.Value, obj.Val())
+ return
+ }
+ }
+}
+
+func checkVarValue(t *testing.T, prog *ssa.Program, pkg *ssa.Package, ref []ast.Node, obj *types.Var, expKind string, wantAddr bool) {
+ // The prefix of all assertions messages.
+ prefix := fmt.Sprintf("VarValue(%s @ L%d)",
+ obj, prog.Fset.Position(ref[0].Pos()).Line)
+
+ v, gotAddr := prog.VarValue(obj, pkg, ref)
+
+ // Kind is the concrete type of the ssa Value.
+ gotKind := "nil"
+ if v != nil {
+ gotKind = fmt.Sprintf("%T", v)[len("*ssa."):]
+ }
+
+ // fmt.Printf("%s = %v (kind %q; expect %q) wantAddr=%t gotAddr=%t\n", prefix, v, gotKind, expKind, wantAddr, gotAddr) // debugging
+
+ // Check the kinds match.
+ // "nil" indicates expected failure (e.g. optimized away).
+ if expKind != gotKind {
+ t.Errorf("%s concrete type == %s, want %s", prefix, gotKind, expKind)
+ }
+
+ // Check the types match.
+ // If wantAddr, the expected type is the object's address.
+ if v != nil {
+ expType := obj.Type()
+ if wantAddr {
+ expType = types.NewPointer(expType)
+ if !gotAddr {
+ t.Errorf("%s: got value, want address", prefix)
+ }
+ } else if gotAddr {
+ t.Errorf("%s: got address, want value", prefix)
+ }
+ if !types.Identical(v.Type(), expType) {
+ t.Errorf("%s.Type() == %s, want %s", prefix, v.Type(), expType)
+ }
+ }
+}
+
+// Ensure that, in debug mode, we can determine the ssa.Value
+// corresponding to every ast.Expr.
+func TestValueForExpr(t *testing.T) {
+ conf := loader.Config{ParserMode: parser.ParseComments}
+ f, err := conf.ParseFile("testdata/valueforexpr.go", nil)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ conf.CreateFromFiles("main", f)
+
+ iprog, err := conf.Load()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ mainInfo := iprog.Created[0]
+
+ prog := ssa.Create(iprog, 0)
+ mainPkg := prog.Package(mainInfo.Pkg)
+ mainPkg.SetDebugMode(true)
+ mainPkg.Build()
+
+ if false {
+ // debugging
+ for _, mem := range mainPkg.Members {
+ if fn, ok := mem.(*ssa.Function); ok {
+ fn.WriteTo(os.Stderr)
+ }
+ }
+ }
+
+ // Find the actual AST node for each canonical position.
+ parenExprByPos := make(map[token.Pos]*ast.ParenExpr)
+ ast.Inspect(f, func(n ast.Node) bool {
+ if n != nil {
+ if e, ok := n.(*ast.ParenExpr); ok {
+ parenExprByPos[e.Pos()] = e
+ }
+ }
+ return true
+ })
+
+ // Find all annotations of form /*@kind*/.
+ for _, c := range f.Comments {
+ text := strings.TrimSpace(c.Text())
+ if text == "" || text[0] != '@' {
+ continue
+ }
+ text = text[1:]
+ pos := c.End() + 1
+ position := prog.Fset.Position(pos)
+ var e ast.Expr
+ if target := parenExprByPos[pos]; target == nil {
+ t.Errorf("%s: annotation doesn't precede ParenExpr: %q", position, text)
+ continue
+ } else {
+ e = target.X
+ }
+
+ path, _ := astutil.PathEnclosingInterval(f, pos, pos)
+ if path == nil {
+ t.Errorf("%s: can't find AST path from root to comment: %s", position, text)
+ continue
+ }
+
+ fn := ssa.EnclosingFunction(mainPkg, path)
+ if fn == nil {
+ t.Errorf("%s: can't find enclosing function", position)
+ continue
+ }
+
+ v, gotAddr := fn.ValueForExpr(e) // (may be nil)
+ got := strings.TrimPrefix(fmt.Sprintf("%T", v), "*ssa.")
+ if want := text; got != want {
+ t.Errorf("%s: got value %q, want %q", position, got, want)
+ }
+ if v != nil {
+ T := v.Type()
+ if gotAddr {
+ T = T.Underlying().(*types.Pointer).Elem() // deref
+ }
+ if !types.Identical(T, mainInfo.TypeOf(e)) {
+ t.Errorf("%s: got type %s, want %s", position, mainInfo.TypeOf(e), T)
+ }
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/go/ssa/ssa.go b/llgo/third_party/go.tools/go/ssa/ssa.go
new file mode 100644
index 0000000000000000000000000000000000000000..387ea5f30ae71db400bd4463122855d51d542adf
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/ssa.go
@@ -0,0 +1,1688 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This package defines a high-level intermediate representation for
+// Go programs using static single-assignment (SSA) form.
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "sync"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llgo/third_party/go.tools/go/types/typeutil"
+)
+
+// A Program is a partial or complete Go program converted to SSA form.
+//
+type Program struct {
+ Fset *token.FileSet // position information for the files of this Program
+ imported map[string]*Package // all importable Packages, keyed by import path
+ packages map[*types.Package]*Package // all loaded Packages, keyed by object
+ mode BuilderMode // set of mode bits for SSA construction
+ MethodSets types.MethodSetCache // cache of type-checker's method-sets
+
+ methodsMu sync.Mutex // guards the following maps:
+ methodSets typeutil.Map // maps type to its concrete methodSet
+ bounds map[*types.Func]*Function // bounds for curried x.Method closures
+ thunks map[selectionKey]*Function // thunks for T.Method expressions
+}
+
+// A Package is a single analyzed Go package containing Members for
+// all package-level functions, variables, constants and types it
+// declares. These may be accessed directly via Members, or via the
+// type-specific accessor methods Func, Type, Var and Const.
+//
+type Package struct {
+ Prog *Program // the owning program
+ Object *types.Package // the type checker's package object for this package
+ Members map[string]Member // all package members keyed by name
+ methodsMu sync.Mutex // guards needRTTI and methodSets
+ methodSets []types.Type // types whose method sets are included in this package
+ values map[types.Object]Value // package members (incl. types and methods), keyed by object
+ init *Function // Func("init"); the package's init function
+ debug bool // include full debug info in this package.
+
+ // The following fields are set transiently, then cleared
+ // after building.
+ started int32 // atomically tested and set at start of build phase
+ ninit int32 // number of init functions
+ info *loader.PackageInfo // package ASTs and type information
+ needRTTI typeutil.Map // types for which runtime type info is needed
+}
+
+// A Member is a member of a Go package, implemented by *NamedConst,
+// *Global, *Function, or *Type; they are created by package-level
+// const, var, func and type declarations respectively.
+//
+type Member interface {
+ Name() string // declared name of the package member
+ String() string // package-qualified name of the package member
+ RelString(*types.Package) string // like String, but relative refs are unqualified
+ Object() types.Object // typechecker's object for this member, if any
+ Pos() token.Pos // position of member's declaration, if known
+ Type() types.Type // type of the package member
+ Token() token.Token // token.{VAR,FUNC,CONST,TYPE}
+ Package() *Package // returns the containing package. (TODO: rename Pkg)
+}
+
+// A Type is a Member of a Package representing a package-level named type.
+//
+// Type() returns a *types.Named.
+//
+type Type struct {
+ object *types.TypeName
+ pkg *Package
+}
+
+// A NamedConst is a Member of Package representing a package-level
+// named constant value.
+//
+// Pos() returns the position of the declaring ast.ValueSpec.Names[*]
+// identifier.
+//
+// NB: a NamedConst is not a Value; it contains a constant Value, which
+// it augments with the name and position of its 'const' declaration.
+//
+type NamedConst struct {
+ object *types.Const
+ Value *Const
+ pos token.Pos
+ pkg *Package
+}
+
+// A Value is an SSA value that can be referenced by an instruction.
+type Value interface {
+ // Name returns the name of this value, and determines how
+ // this Value appears when used as an operand of an
+ // Instruction.
+ //
+ // This is the same as the source name for Parameters,
+ // Builtins, Functions, FreeVars, Globals.
+ // For constants, it is a representation of the constant's value
+ // and type. For all other Values this is the name of the
+ // virtual register defined by the instruction.
+ //
+ // The name of an SSA Value is not semantically significant,
+ // and may not even be unique within a function.
+ Name() string
+
+ // If this value is an Instruction, String returns its
+ // disassembled form; otherwise it returns unspecified
+ // human-readable information about the Value, such as its
+ // kind, name and type.
+ String() string
+
+ // Type returns the type of this value. Many instructions
+ // (e.g. IndexAddr) change their behaviour depending on the
+ // types of their operands.
+ Type() types.Type
+
+ // Parent returns the function to which this Value belongs.
+ // It returns nil for named Functions, Builtin, Const and Global.
+ Parent() *Function
+
+ // Referrers returns the list of instructions that have this
+ // value as one of their operands; it may contain duplicates
+ // if an instruction has a repeated operand.
+ //
+ // Referrers actually returns a pointer through which the
+ // caller may perform mutations to the object's state.
+ //
+ // Referrers is currently only defined if Parent()!=nil,
+ // i.e. for the function-local values FreeVar, Parameter,
+ // Functions (iff anonymous) and all value-defining instructions.
+ // It returns nil for named Functions, Builtin, Const and Global.
+ //
+ // Instruction.Operands contains the inverse of this relation.
+ Referrers() *[]Instruction
+
+ // Pos returns the location of the AST token most closely
+ // associated with the operation that gave rise to this value,
+ // or token.NoPos if it was not explicit in the source.
+ //
+ // For each ast.Node type, a particular token is designated as
+ // the closest location for the expression, e.g. the Lparen
+ // for an *ast.CallExpr. This permits a compact but
+ // approximate mapping from Values to source positions for use
+ // in diagnostic messages, for example.
+ //
+ // (Do not use this position to determine which Value
+ // corresponds to an ast.Expr; use Function.ValueForExpr
+ // instead. NB: it requires that the function was built with
+ // debug information.)
+ //
+ Pos() token.Pos
+}
+
+// An Instruction is an SSA instruction that computes a new Value or
+// has some effect.
+//
+// An Instruction that defines a value (e.g. BinOp) also implements
+// the Value interface; an Instruction that only has an effect (e.g. Store)
+// does not.
+//
+type Instruction interface {
+ // String returns the disassembled form of this value. e.g.
+ //
+ // Examples of Instructions that define a Value:
+ // e.g. "x + y" (BinOp)
+ // "len([])" (Call)
+ // Note that the name of the Value is not printed.
+ //
+ // Examples of Instructions that do define (are) Values:
+ // e.g. "return x" (Return)
+ // "*y = x" (Store)
+ //
+ // (This separation is useful for some analyses which
+ // distinguish the operation from the value it
+ // defines. e.g. 'y = local int' is both an allocation of
+ // memory 'local int' and a definition of a pointer y.)
+ String() string
+
+ // Parent returns the function to which this instruction
+ // belongs.
+ Parent() *Function
+
+ // Block returns the basic block to which this instruction
+ // belongs.
+ Block() *BasicBlock
+
+ // setBlock sets the basic block to which this instruction belongs.
+ setBlock(*BasicBlock)
+
+ // Operands returns the operands of this instruction: the
+ // set of Values it references.
+ //
+ // Specifically, it appends their addresses to rands, a
+ // user-provided slice, and returns the resulting slice,
+ // permitting avoidance of memory allocation.
+ //
+ // The operands are appended in undefined order, but the order
+ // is consistent for a given Instruction; the addresses are
+ // always non-nil but may point to a nil Value. Clients may
+ // store through the pointers, e.g. to effect a value
+ // renaming.
+ //
+ // Value.Referrers is a subset of the inverse of this
+ // relation. (Referrers are not tracked for all types of
+ // Values.)
+ Operands(rands []*Value) []*Value
+
+ // Pos returns the location of the AST token most closely
+ // associated with the operation that gave rise to this
+ // instruction, or token.NoPos if it was not explicit in the
+ // source.
+ //
+ // For each ast.Node type, a particular token is designated as
+ // the closest location for the expression, e.g. the Go token
+ // for an *ast.GoStmt. This permits a compact but approximate
+ // mapping from Instructions to source positions for use in
+ // diagnostic messages, for example.
+ //
+ // (Do not use this position to determine which Instruction
+ // corresponds to an ast.Expr; see the notes for Value.Pos.
+ // This position may be used to determine which non-Value
+ // Instruction corresponds to some ast.Stmts, but not all: If
+ // and Jump instructions have no Pos(), for example.)
+ //
+ Pos() token.Pos
+}
+
+// A Node is a node in the SSA value graph. Every concrete type that
+// implements Node is also either a Value, an Instruction, or both.
+//
+// Node contains the methods common to Value and Instruction, plus the
+// Operands and Referrers methods generalized to return nil for
+// non-Instructions and non-Values, respectively.
+//
+// Node is provided to simplify SSA graph algorithms. Clients should
+// use the more specific and informative Value or Instruction
+// interfaces where appropriate.
+//
+type Node interface {
+ // Common methods:
+ String() string
+ Pos() token.Pos
+ Parent() *Function
+
+ // Partial methods:
+ Operands(rands []*Value) []*Value // nil for non-Instructions
+ Referrers() *[]Instruction // nil for non-Values
+}
+
+// Function represents the parameters, results and code of a function
+// or method.
+//
+// If Blocks is nil, this indicates an external function for which no
+// Go source code is available. In this case, FreeVars and Locals
+// will be nil too. Clients performing whole-program analysis must
+// handle external functions specially.
+//
+// Blocks contains the function's control-flow graph (CFG).
+// Blocks[0] is the function entry point; block order is not otherwise
+// semantically significant, though it may affect the readability of
+// the disassembly.
+// To iterate over the blocks in dominance order, use DomPreorder().
+//
+// Recover is an optional second entry point to which control resumes
+// after a recovered panic. The Recover block may contain only a return
+// statement, preceded by a load of the function's named return
+// parameters, if any.
+//
+// A nested function (Parent()!=nil) that refers to one or more
+// lexically enclosing local variables ("free variables") has FreeVar
+// parameters. Such functions cannot be called directly but require a
+// value created by MakeClosure which, via its Bindings, supplies
+// values for these parameters.
+//
+// If the function is a method (Signature.Recv() != nil) then the first
+// element of Params is the receiver parameter.
+//
+// Pos() returns the declaring ast.FuncLit.Type.Func or the position
+// of the ast.FuncDecl.Name, if the function was explicit in the
+// source. Synthetic wrappers, for which Synthetic != "", may share
+// the same position as the function they wrap.
+// Syntax.Pos() always returns the position of the declaring "func" token.
+//
+// Type() returns the function's Signature.
+//
+type Function struct {
+ name string
+ object types.Object // a declared *types.Func or one of its wrappers
+ method *types.Selection // info about provenance of synthetic methods
+ Signature *types.Signature
+ pos token.Pos
+
+ Synthetic string // provenance of synthetic function; "" for true source functions
+ syntax ast.Node // *ast.Func{Decl,Lit}; replaced with simple ast.Node after build, unless debug mode
+ parent *Function // enclosing function if anon; nil if global
+ Pkg *Package // enclosing package; nil for shared funcs (wrappers and error.Error)
+ Prog *Program // enclosing program
+ Params []*Parameter // function parameters; for methods, includes receiver
+ FreeVars []*FreeVar // free variables whose values must be supplied by closure
+ Locals []*Alloc // local variables of this function
+ Blocks []*BasicBlock // basic blocks of the function; nil => external
+ Recover *BasicBlock // optional; control transfers here after recovered panic
+ AnonFuncs []*Function // anonymous functions directly beneath this one
+ referrers []Instruction // referring instructions (iff Parent() != nil)
+
+ // The following fields are set transiently during building,
+ // then cleared.
+ currentBlock *BasicBlock // where to emit code
+ objects map[types.Object]Value // addresses of local variables
+ namedResults []*Alloc // tuple of named results
+ targets *targets // linked stack of branch targets
+ lblocks map[*ast.Object]*lblock // labelled blocks
+}
+
+// An SSA basic block.
+//
+// The final element of Instrs is always an explicit transfer of
+// control (If, Jump, Return or Panic).
+//
+// A block may contain no Instructions only if it is unreachable,
+// i.e. Preds is nil. Empty blocks are typically pruned.
+//
+// BasicBlocks and their Preds/Succs relation form a (possibly cyclic)
+// graph independent of the SSA Value graph: the control-flow graph or
+// CFG. It is illegal for multiple edges to exist between the same
+// pair of blocks.
+//
+// Each BasicBlock is also a node in the dominator tree of the CFG.
+// The tree may be navigated using Idom()/Dominees() and queried using
+// Dominates().
+//
+// The order of Preds and Succs is significant (to Phi and If
+// instructions, respectively).
+//
+type BasicBlock struct {
+ Index int // index of this block within Parent().Blocks
+ Comment string // optional label; no semantic significance
+ parent *Function // parent function
+ Instrs []Instruction // instructions in order
+ Preds, Succs []*BasicBlock // predecessors and successors
+ succs2 [2]*BasicBlock // initial space for Succs.
+ dom domInfo // dominator tree info
+ gaps int // number of nil Instrs (transient).
+ rundefers int // number of rundefers (transient)
+}
+
+// Pure values ----------------------------------------
+
+// A FreeVar represents a free variable of the function to which it
+// belongs.
+//
+// FreeVars are used to implement anonymous functions, whose free
+// variables are lexically captured in a closure formed by
+// MakeClosure. The value of such a free var is an Alloc or another
+// FreeVar and is considered a potentially escaping heap address, with
+// pointer type.
+//
+// FreeVars are also used to implement bound method closures. Such a
+// free var represents the receiver value and may be of any type that
+// has concrete methods.
+//
+// Pos() returns the position of the value that was captured, which
+// belongs to an enclosing function.
+//
+type FreeVar struct {
+ name string
+ typ types.Type
+ pos token.Pos
+ parent *Function
+ referrers []Instruction
+
+ // Transiently needed during building.
+ outer Value // the Value captured from the enclosing context.
+}
+
+// A Parameter represents an input parameter of a function.
+//
+type Parameter struct {
+ name string
+ object types.Object // a *types.Var; nil for non-source locals
+ typ types.Type
+ pos token.Pos
+ parent *Function
+ referrers []Instruction
+}
+
+// A Const represents the value of a constant expression.
+//
+// The underlying type of a constant may be any boolean, numeric, or
+// string type. In addition, a Const may represent the nil value of
+// any reference type: interface, map, channel, pointer, slice, or
+// function---but not "untyped nil".
+//
+// All source-level constant expressions are represented by a Const
+// of equal type and value.
+//
+// Value holds the exact value of the constant, independent of its
+// Type(), using the same representation as package go/exact uses for
+// constants, or nil for a typed nil value.
+//
+// Pos() returns token.NoPos.
+//
+// Example printed form:
+// 42:int
+// "hello":untyped string
+// 3+4i:MyComplex
+//
+type Const struct {
+ typ types.Type
+ Value exact.Value
+}
+
+// A Global is a named Value holding the address of a package-level
+// variable.
+//
+// Pos() returns the position of the ast.ValueSpec.Names[*]
+// identifier.
+//
+type Global struct {
+ name string
+ object types.Object // a *types.Var; may be nil for synthetics e.g. init$guard
+ typ types.Type
+ pos token.Pos
+
+ Pkg *Package
+}
+
+// A Builtin represents a specific use of a built-in function, e.g. len.
+//
+// Builtins are immutable values. Builtins do not have addresses.
+// Builtins can only appear in CallCommon.Func.
+//
+// Name() indicates the function: one of the built-in functions from the
+// Go spec (excluding "make" and "new") or one of these ssa-defined
+// intrinsics:
+//
+// // wrapnilchk returns ptr if non-nil, panics otherwise.
+// // (For use in indirection wrappers.)
+// func ssa:wrapnilchk(ptr *T, recvType, methodName string) *T
+//
+// Object() returns a *types.Builtin for built-ins defined by the spec,
+// nil for others.
+//
+// Type() returns a *types.Signature representing the effective
+// signature of the built-in for this call.
+//
+type Builtin struct {
+ name string
+ sig *types.Signature
+}
+
+// Value-defining instructions ----------------------------------------
+
+// The Alloc instruction reserves space for a value of the given type,
+// zero-initializes it, and yields its address.
+//
+// Alloc values are always addresses, and have pointer types, so the
+// type of the allocated space is actually indirect(Type()).
+//
+// If Heap is false, Alloc allocates space in the function's
+// activation record (frame); we refer to an Alloc(Heap=false) as a
+// "local" alloc. Each local Alloc returns the same address each time
+// it is executed within the same activation; the space is
+// re-initialized to zero.
+//
+// If Heap is true, Alloc allocates space in the heap, and returns; we
+// refer to an Alloc(Heap=true) as a "new" alloc. Each new Alloc
+// returns a different address each time it is executed.
+//
+// When Alloc is applied to a channel, map or slice type, it returns
+// the address of an uninitialized (nil) reference of that kind; store
+// the result of MakeSlice, MakeMap or MakeChan in that location to
+// instantiate these types.
+//
+// Pos() returns the ast.CompositeLit.Lbrace for a composite literal,
+// or the ast.CallExpr.Rparen for a call to new() or for a call that
+// allocates a varargs slice.
+//
+// Example printed form:
+// t0 = local int
+// t1 = new int
+//
+type Alloc struct {
+ register
+ Comment string
+ Heap bool
+ index int // dense numbering; for lifting
+}
+
+// The Phi instruction represents an SSA φ-node, which combines values
+// that differ across incoming control-flow edges and yields a new
+// value. Within a block, all φ-nodes must appear before all non-φ
+// nodes.
+//
+// Pos() returns the position of the && or || for short-circuit
+// control-flow joins, or that of the *Alloc for φ-nodes inserted
+// during SSA renaming.
+//
+// Example printed form:
+// t2 = phi [0.start: t0, 1.if.then: t1, ...]
+//
+type Phi struct {
+ register
+ Comment string // a hint as to its purpose
+ Edges []Value // Edges[i] is value for Block().Preds[i]
+}
+
+// The Call instruction represents a function or method call.
+//
+// The Call instruction yields the function result, if there is
+// exactly one, or a tuple (empty or len>1) whose components are
+// accessed via Extract.
+//
+// See CallCommon for generic function call documentation.
+//
+// Pos() returns the ast.CallExpr.Lparen, if explicit in the source.
+//
+// Example printed form:
+// t2 = println(t0, t1)
+// t4 = t3()
+// t7 = invoke t5.Println(...t6)
+//
+type Call struct {
+ register
+ Call CallCommon
+}
+
+// The BinOp instruction yields the result of binary operation X Op Y.
+//
+// Pos() returns the ast.BinaryExpr.OpPos, if explicit in the source.
+//
+// Example printed form:
+// t1 = t0 + 1:int
+//
+type BinOp struct {
+ register
+ // One of:
+ // ADD SUB MUL QUO REM + - * / %
+ // AND OR XOR SHL SHR AND_NOT & | ^ << >> &~
+ // EQL LSS GTR NEQ LEQ GEQ == != < <= < >=
+ Op token.Token
+ X, Y Value
+}
+
+// The UnOp instruction yields the result of Op X.
+// ARROW is channel receive.
+// MUL is pointer indirection (load).
+// XOR is bitwise complement.
+// SUB is negation.
+// NOT is logical negation.
+//
+// If CommaOk and Op=ARROW, the result is a 2-tuple of the value above
+// and a boolean indicating the success of the receive. The
+// components of the tuple are accessed using Extract.
+//
+// Pos() returns the ast.UnaryExpr.OpPos or ast.RangeStmt.TokPos (for
+// ranging over a channel), if explicit in the source.
+//
+// Example printed form:
+// t0 = *x
+// t2 = <-t1,ok
+//
+type UnOp struct {
+ register
+ Op token.Token // One of: NOT SUB ARROW MUL XOR ! - <- * ^
+ X Value
+ CommaOk bool
+}
+
+// The ChangeType instruction applies to X a value-preserving type
+// change to Type().
+//
+// Type changes are permitted:
+// - between a named type and its underlying type.
+// - between two named types of the same underlying type.
+// - between (possibly named) pointers to identical base types.
+// - from a bidirectional channel to a read- or write-channel,
+// optionally adding/removing a name.
+//
+// This operation cannot fail dynamically.
+//
+// Pos() returns the ast.CallExpr.Lparen, if the instruction arose
+// from an explicit conversion in the source.
+//
+// Example printed form:
+// t1 = changetype *int <- IntPtr (t0)
+//
+type ChangeType struct {
+ register
+ X Value
+}
+
+// The Convert instruction yields the conversion of value X to type
+// Type(). One or both of those types is basic (but possibly named).
+//
+// A conversion may change the value and representation of its operand.
+// Conversions are permitted:
+// - between real numeric types.
+// - between complex numeric types.
+// - between string and []byte or []rune.
+// - between pointers and unsafe.Pointer.
+// - between unsafe.Pointer and uintptr.
+// - from (Unicode) integer to (UTF-8) string.
+// A conversion may imply a type name change also.
+//
+// This operation cannot fail dynamically.
+//
+// Conversions of untyped string/number/bool constants to a specific
+// representation are eliminated during SSA construction.
+//
+// Pos() returns the ast.CallExpr.Lparen, if the instruction arose
+// from an explicit conversion in the source.
+//
+// Example printed form:
+// t1 = convert []byte <- string (t0)
+//
+type Convert struct {
+ register
+ X Value
+}
+
+// ChangeInterface constructs a value of one interface type from a
+// value of another interface type known to be assignable to it.
+// This operation cannot fail.
+//
+// Pos() returns the ast.CallExpr.Lparen if the instruction arose from
+// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the
+// instruction arose from an explicit e.(T) operation; or token.NoPos
+// otherwise.
+//
+// Example printed form:
+// t1 = change interface interface{} <- I (t0)
+//
+type ChangeInterface struct {
+ register
+ X Value
+}
+
+// MakeInterface constructs an instance of an interface type from a
+// value of a concrete type.
+//
+// Use Program.MethodSets.MethodSet(X.Type()) to find the method-set
+// of X, and Program.Method(m) to find the implementation of a method.
+//
+// To construct the zero value of an interface type T, use:
+// NewConst(exact.MakeNil(), T, pos)
+//
+// Pos() returns the ast.CallExpr.Lparen, if the instruction arose
+// from an explicit conversion in the source.
+//
+// Example printed form:
+// t1 = make interface{} <- int (42:int)
+// t2 = make Stringer <- t0
+//
+type MakeInterface struct {
+ register
+ X Value
+}
+
+// The MakeClosure instruction yields a closure value whose code is
+// Fn and whose free variables' values are supplied by Bindings.
+//
+// Type() returns a (possibly named) *types.Signature.
+//
+// Pos() returns the ast.FuncLit.Type.Func for a function literal
+// closure or the ast.SelectorExpr.Sel for a bound method closure.
+//
+// Example printed form:
+// t0 = make closure anon@1.2 [x y z]
+// t1 = make closure bound$(main.I).add [i]
+//
+type MakeClosure struct {
+ register
+ Fn Value // always a *Function
+ Bindings []Value // values for each free variable in Fn.FreeVars
+}
+
+// The MakeMap instruction creates a new hash-table-based map object
+// and yields a value of kind map.
+//
+// Type() returns a (possibly named) *types.Map.
+//
+// Pos() returns the ast.CallExpr.Lparen, if created by make(map), or
+// the ast.CompositeLit.Lbrack if created by a literal.
+//
+// Example printed form:
+// t1 = make map[string]int t0
+// t1 = make StringIntMap t0
+//
+type MakeMap struct {
+ register
+ Reserve Value // initial space reservation; nil => default
+}
+
+// The MakeChan instruction creates a new channel object and yields a
+// value of kind chan.
+//
+// Type() returns a (possibly named) *types.Chan.
+//
+// Pos() returns the ast.CallExpr.Lparen for the make(chan) that
+// created it.
+//
+// Example printed form:
+// t0 = make chan int 0
+// t0 = make IntChan 0
+//
+type MakeChan struct {
+ register
+ Size Value // int; size of buffer; zero => synchronous.
+}
+
+// The MakeSlice instruction yields a slice of length Len backed by a
+// newly allocated array of length Cap.
+//
+// Both Len and Cap must be non-nil Values of integer type.
+//
+// (Alloc(types.Array) followed by Slice will not suffice because
+// Alloc can only create arrays of constant length.)
+//
+// Type() returns a (possibly named) *types.Slice.
+//
+// Pos() returns the ast.CallExpr.Lparen for the make([]T) that
+// created it.
+//
+// Example printed form:
+// t1 = make []string 1:int t0
+// t1 = make StringSlice 1:int t0
+//
+type MakeSlice struct {
+ register
+ Len Value
+ Cap Value
+}
+
+// The Slice instruction yields a slice of an existing string, slice
+// or *array X between optional integer bounds Low and High.
+//
+// Dynamically, this instruction panics if X evaluates to a nil *array
+// pointer.
+//
+// Type() returns string if the type of X was string, otherwise a
+// *types.Slice with the same element type as X.
+//
+// Pos() returns the ast.SliceExpr.Lbrack if created by a x[:] slice
+// operation, the ast.CompositeLit.Lbrace if created by a literal, or
+// NoPos if not explicit in the source (e.g. a variadic argument slice).
+//
+// Example printed form:
+// t1 = slice t0[1:]
+//
+type Slice struct {
+ register
+ X Value // slice, string, or *array
+ Low, High, Max Value // each may be nil
+}
+
+// The FieldAddr instruction yields the address of Field of *struct X.
+//
+// The field is identified by its index within the field list of the
+// struct type of X.
+//
+// Dynamically, this instruction panics if X evaluates to a nil
+// pointer.
+//
+// Type() returns a (possibly named) *types.Pointer.
+//
+// Pos() returns the position of the ast.SelectorExpr.Sel for the
+// field, if explicit in the source.
+//
+// Example printed form:
+// t1 = &t0.name [#1]
+//
+type FieldAddr struct {
+ register
+ X Value // *struct
+ Field int // index into X.Type().Deref().(*types.Struct).Fields
+}
+
+// The Field instruction yields the Field of struct X.
+//
+// The field is identified by its index within the field list of the
+// struct type of X; by using numeric indices we avoid ambiguity of
+// package-local identifiers and permit compact representations.
+//
+// Pos() returns the position of the ast.SelectorExpr.Sel for the
+// field, if explicit in the source.
+//
+// Example printed form:
+// t1 = t0.name [#1]
+//
+type Field struct {
+ register
+ X Value // struct
+ Field int // index into X.Type().(*types.Struct).Fields
+}
+
+// The IndexAddr instruction yields the address of the element at
+// index Index of collection X. Index is an integer expression.
+//
+// The elements of maps and strings are not addressable; use Lookup or
+// MapUpdate instead.
+//
+// Dynamically, this instruction panics if X evaluates to a nil *array
+// pointer.
+//
+// Type() returns a (possibly named) *types.Pointer.
+//
+// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if
+// explicit in the source.
+//
+// Example printed form:
+// t2 = &t0[t1]
+//
+type IndexAddr struct {
+ register
+ X Value // slice or *array,
+ Index Value // numeric index
+}
+
+// The Index instruction yields element Index of array X.
+//
+// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if
+// explicit in the source.
+//
+// Example printed form:
+// t2 = t0[t1]
+//
+type Index struct {
+ register
+ X Value // array
+ Index Value // integer index
+}
+
+// The Lookup instruction yields element Index of collection X, a map
+// or string. Index is an integer expression if X is a string or the
+// appropriate key type if X is a map.
+//
+// If CommaOk, the result is a 2-tuple of the value above and a
+// boolean indicating the result of a map membership test for the key.
+// The components of the tuple are accessed using Extract.
+//
+// Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source.
+//
+// Example printed form:
+// t2 = t0[t1]
+// t5 = t3[t4],ok
+//
+type Lookup struct {
+ register
+ X Value // string or map
+ Index Value // numeric or key-typed index
+ CommaOk bool // return a value,ok pair
+}
+
+// SelectState is a helper for Select.
+// It represents one goal state and its corresponding communication.
+//
+type SelectState struct {
+ Dir types.ChanDir // direction of case (SendOnly or RecvOnly)
+ Chan Value // channel to use (for send or receive)
+ Send Value // value to send (for send)
+ Pos token.Pos // position of token.ARROW
+ DebugNode ast.Node // ast.SendStmt or ast.UnaryExpr(<-) [debug mode]
+}
+
+// The Select instruction tests whether (or blocks until) one
+// of the specified sent or received states is entered.
+//
+// Let n be the number of States for which Dir==RECV and T_i (0<=i string iterator; false => map iterator.
+}
+
+// The TypeAssert instruction tests whether interface value X has type
+// AssertedType.
+//
+// If !CommaOk, on success it returns v, the result of the conversion
+// (defined below); on failure it panics.
+//
+// If CommaOk: on success it returns a pair (v, true) where v is the
+// result of the conversion; on failure it returns (z, false) where z
+// is AssertedType's zero value. The components of the pair must be
+// accessed using the Extract instruction.
+//
+// If AssertedType is a concrete type, TypeAssert checks whether the
+// dynamic type in interface X is equal to it, and if so, the result
+// of the conversion is a copy of the value in the interface.
+//
+// If AssertedType is an interface, TypeAssert checks whether the
+// dynamic type of the interface is assignable to it, and if so, the
+// result of the conversion is a copy of the interface value X.
+// If AssertedType is a superinterface of X.Type(), the operation will
+// fail iff the operand is nil. (Contrast with ChangeInterface, which
+// performs no nil-check.)
+//
+// Type() reflects the actual type of the result, possibly a
+// 2-types.Tuple; AssertedType is the asserted type.
+//
+// Pos() returns the ast.CallExpr.Lparen if the instruction arose from
+// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the
+// instruction arose from an explicit e.(T) operation; or the
+// ast.CaseClause.Case if the instruction arose from a case of a
+// type-switch statement.
+//
+// Example printed form:
+// t1 = typeassert t0.(int)
+// t3 = typeassert,ok t2.(T)
+//
+type TypeAssert struct {
+ register
+ X Value
+ AssertedType types.Type
+ CommaOk bool
+}
+
+// The Extract instruction yields component Index of Tuple.
+//
+// This is used to access the results of instructions with multiple
+// return values, such as Call, TypeAssert, Next, UnOp(ARROW) and
+// IndexExpr(Map).
+//
+// Example printed form:
+// t1 = extract t0 #1
+//
+type Extract struct {
+ register
+ Tuple Value
+ Index int
+}
+
+// Instructions executed for effect. They do not yield a value. --------------------
+
+// The Jump instruction transfers control to the sole successor of its
+// owning block.
+//
+// A Jump must be the last instruction of its containing BasicBlock.
+//
+// Pos() returns NoPos.
+//
+// Example printed form:
+// jump done
+//
+type Jump struct {
+ anInstruction
+}
+
+// The If instruction transfers control to one of the two successors
+// of its owning block, depending on the boolean Cond: the first if
+// true, the second if false.
+//
+// An If instruction must be the last instruction of its containing
+// BasicBlock.
+//
+// Pos() returns NoPos.
+//
+// Example printed form:
+// if t0 goto done else body
+//
+type If struct {
+ anInstruction
+ Cond Value
+}
+
+// The Return instruction returns values and control back to the calling
+// function.
+//
+// len(Results) is always equal to the number of results in the
+// function's signature.
+//
+// If len(Results) > 1, Return returns a tuple value with the specified
+// components which the caller must access using Extract instructions.
+//
+// There is no instruction to return a ready-made tuple like those
+// returned by a "value,ok"-mode TypeAssert, Lookup or UnOp(ARROW) or
+// a tail-call to a function with multiple result parameters.
+//
+// Return must be the last instruction of its containing BasicBlock.
+// Such a block has no successors.
+//
+// Pos() returns the ast.ReturnStmt.Return, if explicit in the source.
+//
+// Example printed form:
+// return
+// return nil:I, 2:int
+//
+type Return struct {
+ anInstruction
+ Results []Value
+ pos token.Pos
+}
+
+// The RunDefers instruction pops and invokes the entire stack of
+// procedure calls pushed by Defer instructions in this function.
+//
+// It is legal to encounter multiple 'rundefers' instructions in a
+// single control-flow path through a function; this is useful in
+// the combined init() function, for example.
+//
+// Pos() returns NoPos.
+//
+// Example printed form:
+// rundefers
+//
+type RunDefers struct {
+ anInstruction
+}
+
+// The Panic instruction initiates a panic with value X.
+//
+// A Panic instruction must be the last instruction of its containing
+// BasicBlock, which must have no successors.
+//
+// NB: 'go panic(x)' and 'defer panic(x)' do not use this instruction;
+// they are treated as calls to a built-in function.
+//
+// Pos() returns the ast.CallExpr.Lparen if this panic was explicit
+// in the source.
+//
+// Example printed form:
+// panic t0
+//
+type Panic struct {
+ anInstruction
+ X Value // an interface{}
+ pos token.Pos
+}
+
+// The Go instruction creates a new goroutine and calls the specified
+// function within it.
+//
+// See CallCommon for generic function call documentation.
+//
+// Pos() returns the ast.GoStmt.Go.
+//
+// Example printed form:
+// go println(t0, t1)
+// go t3()
+// go invoke t5.Println(...t6)
+//
+type Go struct {
+ anInstruction
+ Call CallCommon
+ pos token.Pos
+}
+
+// The Defer instruction pushes the specified call onto a stack of
+// functions to be called by a RunDefers instruction or by a panic.
+//
+// See CallCommon for generic function call documentation.
+//
+// Pos() returns the ast.DeferStmt.Defer.
+//
+// Example printed form:
+// defer println(t0, t1)
+// defer t3()
+// defer invoke t5.Println(...t6)
+//
+type Defer struct {
+ anInstruction
+ Call CallCommon
+ pos token.Pos
+}
+
+// The Send instruction sends X on channel Chan.
+//
+// Pos() returns the ast.SendStmt.Arrow, if explicit in the source.
+//
+// Example printed form:
+// send t0 <- t1
+//
+type Send struct {
+ anInstruction
+ Chan, X Value
+ pos token.Pos
+}
+
+// The Store instruction stores Val at address Addr.
+// Stores can be of arbitrary types.
+//
+// Pos() returns the ast.StarExpr.Star, if explicit in the source.
+//
+// Example printed form:
+// *x = y
+//
+type Store struct {
+ anInstruction
+ Addr Value
+ Val Value
+ pos token.Pos
+}
+
+// The MapUpdate instruction updates the association of Map[Key] to
+// Value.
+//
+// Pos() returns the ast.KeyValueExpr.Colon or ast.IndexExpr.Lbrack,
+// if explicit in the source.
+//
+// Example printed form:
+// t0[t1] = t2
+//
+type MapUpdate struct {
+ anInstruction
+ Map Value
+ Key Value
+ Value Value
+ pos token.Pos
+}
+
+// A DebugRef instruction maps a source-level expression Expr to the
+// SSA value X that represents the value (!IsAddr) or address (IsAddr)
+// of that expression.
+//
+// DebugRef is a pseudo-instruction: it has no dynamic effect.
+//
+// Pos() returns Expr.Pos(), the start position of the source-level
+// expression. This is not the same as the "designated" token as
+// documented at Value.Pos(). e.g. CallExpr.Pos() does not return the
+// position of the ("designated") Lparen token.
+//
+// If Expr is an *ast.Ident denoting a var or func, Object() returns
+// the object; though this information can be obtained from the type
+// checker, including it here greatly facilitates debugging.
+// For non-Ident expressions, Object() returns nil.
+//
+// DebugRefs are generated only for functions built with debugging
+// enabled; see Package.SetDebugMode() and the GlobalDebug builder
+// mode flag.
+//
+// DebugRefs are not emitted for ast.Idents referring to constants or
+// predeclared identifiers, since they are trivial and numerous.
+// Nor are they emitted for ast.ParenExprs.
+//
+// (By representing these as instructions, rather than out-of-band,
+// consistency is maintained during transformation passes by the
+// ordinary SSA renaming machinery.)
+//
+// Example printed form:
+// ; *ast.CallExpr @ 102:9 is t5
+// ; var x float64 @ 109:72 is x
+// ; address of *ast.CompositeLit @ 216:10 is t0
+//
+type DebugRef struct {
+ anInstruction
+ Expr ast.Expr // the referring expression (never *ast.ParenExpr)
+ object types.Object // the identity of the source var/func
+ IsAddr bool // Expr is addressable and X is the address it denotes
+ X Value // the value or address of Expr
+}
+
+// Embeddable mix-ins and helpers for common parts of other structs. -----------
+
+// register is a mix-in embedded by all SSA values that are also
+// instructions, i.e. virtual registers, and provides a uniform
+// implementation of most of the Value interface: Value.Name() is a
+// numbered register (e.g. "t0"); the other methods are field accessors.
+//
+// Temporary names are automatically assigned to each register on
+// completion of building a function in SSA form.
+//
+// Clients must not assume that the 'id' value (and the Name() derived
+// from it) is unique within a function. As always in this API,
+// semantics are determined only by identity; names exist only to
+// facilitate debugging.
+//
+type register struct {
+ anInstruction
+ num int // "name" of virtual register, e.g. "t0". Not guaranteed unique.
+ typ types.Type // type of virtual register
+ pos token.Pos // position of source expression, or NoPos
+ referrers []Instruction
+}
+
+// anInstruction is a mix-in embedded by all Instructions.
+// It provides the implementations of the Block and setBlock methods.
+type anInstruction struct {
+ block *BasicBlock // the basic block of this instruction
+}
+
+// CallCommon is contained by Go, Defer and Call to hold the
+// common parts of a function or method call.
+//
+// Each CallCommon exists in one of two modes, function call and
+// interface method invocation, or "call" and "invoke" for short.
+//
+// 1. "call" mode: when Method is nil (!IsInvoke), a CallCommon
+// represents an ordinary function call of the value in Value,
+// which may be a *Builtin, a *Function or any other value of kind
+// 'func'.
+//
+// Value may be one of:
+// (a) a *Function, indicating a statically dispatched call
+// to a package-level function, an anonymous function, or
+// a method of a named type.
+// (b) a *MakeClosure, indicating an immediately applied
+// function literal with free variables.
+// (c) a *Builtin, indicating a statically dispatched call
+// to a built-in function.
+// (d) any other value, indicating a dynamically dispatched
+// function call.
+// StaticCallee returns the identity of the callee in cases
+// (a) and (b), nil otherwise.
+//
+// Args contains the arguments to the call. If Value is a method,
+// Args[0] contains the receiver parameter.
+//
+// Example printed form:
+// t2 = println(t0, t1)
+// go t3()
+// defer t5(...t6)
+//
+// 2. "invoke" mode: when Method is non-nil (IsInvoke), a CallCommon
+// represents a dynamically dispatched call to an interface method.
+// In this mode, Value is the interface value and Method is the
+// interface's abstract method. Note: an abstract method may be
+// shared by multiple interfaces due to embedding; Value.Type()
+// provides the specific interface used for this call.
+//
+// Value is implicitly supplied to the concrete method implementation
+// as the receiver parameter; in other words, Args[0] holds not the
+// receiver but the first true argument.
+//
+// Example printed form:
+// t1 = invoke t0.String()
+// go invoke t3.Run(t2)
+// defer invoke t4.Handle(...t5)
+//
+// For all calls to variadic functions (Signature().Variadic()),
+// the last element of Args is a slice.
+//
+type CallCommon struct {
+ Value Value // receiver (invoke mode) or func value (call mode)
+ Method *types.Func // abstract method (invoke mode)
+ Args []Value // actual parameters (in static method call, includes receiver)
+ pos token.Pos // position of CallExpr.Lparen, iff explicit in source
+}
+
+// IsInvoke returns true if this call has "invoke" (not "call") mode.
+func (c *CallCommon) IsInvoke() bool {
+ return c.Method != nil
+}
+
+func (c *CallCommon) Pos() token.Pos { return c.pos }
+
+// Signature returns the signature of the called function.
+//
+// For an "invoke"-mode call, the signature of the interface method is
+// returned.
+//
+// In either "call" or "invoke" mode, if the callee is a method, its
+// receiver is represented by sig.Recv, not sig.Params().At(0).
+//
+func (c *CallCommon) Signature() *types.Signature {
+ if c.Method != nil {
+ return c.Method.Type().(*types.Signature)
+ }
+ return c.Value.Type().Underlying().(*types.Signature)
+}
+
+// StaticCallee returns the callee if this is a trivially static
+// "call"-mode call to a function.
+func (c *CallCommon) StaticCallee() *Function {
+ switch fn := c.Value.(type) {
+ case *Function:
+ return fn
+ case *MakeClosure:
+ return fn.Fn.(*Function)
+ }
+ return nil
+}
+
+// Description returns a description of the mode of this call suitable
+// for a user interface, e.g. "static method call".
+func (c *CallCommon) Description() string {
+ switch fn := c.Value.(type) {
+ case *Builtin:
+ return "built-in function call"
+ case *MakeClosure:
+ return "static function closure call"
+ case *Function:
+ if fn.Signature.Recv() != nil {
+ return "static method call"
+ }
+ return "static function call"
+ }
+ if c.IsInvoke() {
+ return "dynamic method call" // ("invoke" mode)
+ }
+ return "dynamic function call"
+}
+
+// The CallInstruction interface, implemented by *Go, *Defer and *Call,
+// exposes the common parts of function-calling instructions,
+// yet provides a way back to the Value defined by *Call alone.
+//
+type CallInstruction interface {
+ Instruction
+ Common() *CallCommon // returns the common parts of the call
+ Value() *Call // returns the result value of the call (*Call) or nil (*Go, *Defer)
+}
+
+func (s *Call) Common() *CallCommon { return &s.Call }
+func (s *Defer) Common() *CallCommon { return &s.Call }
+func (s *Go) Common() *CallCommon { return &s.Call }
+
+func (s *Call) Value() *Call { return s }
+func (s *Defer) Value() *Call { return nil }
+func (s *Go) Value() *Call { return nil }
+
+func (v *Builtin) Type() types.Type { return v.sig }
+func (v *Builtin) Name() string { return v.name }
+func (*Builtin) Referrers() *[]Instruction { return nil }
+func (v *Builtin) Pos() token.Pos { return token.NoPos }
+func (v *Builtin) Object() types.Object { return types.Universe.Lookup(v.name) }
+func (v *Builtin) Parent() *Function { return nil }
+
+func (v *FreeVar) Type() types.Type { return v.typ }
+func (v *FreeVar) Name() string { return v.name }
+func (v *FreeVar) Referrers() *[]Instruction { return &v.referrers }
+func (v *FreeVar) Pos() token.Pos { return v.pos }
+func (v *FreeVar) Parent() *Function { return v.parent }
+
+func (v *Global) Type() types.Type { return v.typ }
+func (v *Global) Name() string { return v.name }
+func (v *Global) Parent() *Function { return nil }
+func (v *Global) Pos() token.Pos { return v.pos }
+func (v *Global) Referrers() *[]Instruction { return nil }
+func (v *Global) Token() token.Token { return token.VAR }
+func (v *Global) Object() types.Object { return v.object }
+func (v *Global) String() string { return v.RelString(nil) }
+func (v *Global) Package() *Package { return v.Pkg }
+func (v *Global) RelString(from *types.Package) string { return relString(v, from) }
+
+func (v *Function) Name() string { return v.name }
+func (v *Function) Type() types.Type { return v.Signature }
+func (v *Function) Pos() token.Pos { return v.pos }
+func (v *Function) Token() token.Token { return token.FUNC }
+func (v *Function) Object() types.Object { return v.object }
+func (v *Function) String() string { return v.RelString(nil) }
+func (v *Function) Package() *Package { return v.Pkg }
+func (v *Function) Parent() *Function { return v.parent }
+func (v *Function) Referrers() *[]Instruction {
+ if v.parent != nil {
+ return &v.referrers
+ }
+ return nil
+}
+
+func (v *Parameter) Type() types.Type { return v.typ }
+func (v *Parameter) Name() string { return v.name }
+func (v *Parameter) Object() types.Object { return v.object }
+func (v *Parameter) Referrers() *[]Instruction { return &v.referrers }
+func (v *Parameter) Pos() token.Pos { return v.pos }
+func (v *Parameter) Parent() *Function { return v.parent }
+
+func (v *Alloc) Type() types.Type { return v.typ }
+func (v *Alloc) Referrers() *[]Instruction { return &v.referrers }
+func (v *Alloc) Pos() token.Pos { return v.pos }
+
+func (v *register) Type() types.Type { return v.typ }
+func (v *register) setType(typ types.Type) { v.typ = typ }
+func (v *register) Name() string { return fmt.Sprintf("t%d", v.num) }
+func (v *register) setNum(num int) { v.num = num }
+func (v *register) Referrers() *[]Instruction { return &v.referrers }
+func (v *register) Pos() token.Pos { return v.pos }
+func (v *register) setPos(pos token.Pos) { v.pos = pos }
+
+func (v *anInstruction) Parent() *Function { return v.block.parent }
+func (v *anInstruction) Block() *BasicBlock { return v.block }
+func (v *anInstruction) setBlock(block *BasicBlock) { v.block = block }
+func (v *anInstruction) Referrers() *[]Instruction { return nil }
+
+func (t *Type) Name() string { return t.object.Name() }
+func (t *Type) Pos() token.Pos { return t.object.Pos() }
+func (t *Type) Type() types.Type { return t.object.Type() }
+func (t *Type) Token() token.Token { return token.TYPE }
+func (t *Type) Object() types.Object { return t.object }
+func (t *Type) String() string { return t.RelString(nil) }
+func (t *Type) Package() *Package { return t.pkg }
+func (t *Type) RelString(from *types.Package) string { return relString(t, from) }
+
+func (c *NamedConst) Name() string { return c.object.Name() }
+func (c *NamedConst) Pos() token.Pos { return c.object.Pos() }
+func (c *NamedConst) String() string { return c.RelString(nil) }
+func (c *NamedConst) Type() types.Type { return c.object.Type() }
+func (c *NamedConst) Token() token.Token { return token.CONST }
+func (c *NamedConst) Object() types.Object { return c.object }
+func (c *NamedConst) Package() *Package { return c.pkg }
+func (c *NamedConst) RelString(from *types.Package) string { return relString(c, from) }
+
+// Func returns the package-level function of the specified name,
+// or nil if not found.
+//
+func (p *Package) Func(name string) (f *Function) {
+ f, _ = p.Members[name].(*Function)
+ return
+}
+
+// Var returns the package-level variable of the specified name,
+// or nil if not found.
+//
+func (p *Package) Var(name string) (g *Global) {
+ g, _ = p.Members[name].(*Global)
+ return
+}
+
+// Const returns the package-level constant of the specified name,
+// or nil if not found.
+//
+func (p *Package) Const(name string) (c *NamedConst) {
+ c, _ = p.Members[name].(*NamedConst)
+ return
+}
+
+// Type returns the package-level type of the specified name,
+// or nil if not found.
+//
+func (p *Package) Type(name string) (t *Type) {
+ t, _ = p.Members[name].(*Type)
+ return
+}
+
+func (v *Call) Pos() token.Pos { return v.Call.pos }
+func (s *Defer) Pos() token.Pos { return s.pos }
+func (s *Go) Pos() token.Pos { return s.pos }
+func (s *MapUpdate) Pos() token.Pos { return s.pos }
+func (s *Panic) Pos() token.Pos { return s.pos }
+func (s *Return) Pos() token.Pos { return s.pos }
+func (s *Send) Pos() token.Pos { return s.pos }
+func (s *Store) Pos() token.Pos { return s.pos }
+func (s *If) Pos() token.Pos { return token.NoPos }
+func (s *Jump) Pos() token.Pos { return token.NoPos }
+func (s *RunDefers) Pos() token.Pos { return token.NoPos }
+func (s *DebugRef) Pos() token.Pos { return s.Expr.Pos() }
+
+// Operands.
+
+func (v *Alloc) Operands(rands []*Value) []*Value {
+ return rands
+}
+
+func (v *BinOp) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X, &v.Y)
+}
+
+func (c *CallCommon) Operands(rands []*Value) []*Value {
+ rands = append(rands, &c.Value)
+ for i := range c.Args {
+ rands = append(rands, &c.Args[i])
+ }
+ return rands
+}
+
+func (s *Go) Operands(rands []*Value) []*Value {
+ return s.Call.Operands(rands)
+}
+
+func (s *Call) Operands(rands []*Value) []*Value {
+ return s.Call.Operands(rands)
+}
+
+func (s *Defer) Operands(rands []*Value) []*Value {
+ return s.Call.Operands(rands)
+}
+
+func (v *ChangeInterface) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+func (v *ChangeType) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+func (v *Convert) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+func (s *DebugRef) Operands(rands []*Value) []*Value {
+ return append(rands, &s.X)
+}
+
+func (v *Extract) Operands(rands []*Value) []*Value {
+ return append(rands, &v.Tuple)
+}
+
+func (v *Field) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+func (v *FieldAddr) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+func (s *If) Operands(rands []*Value) []*Value {
+ return append(rands, &s.Cond)
+}
+
+func (v *Index) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X, &v.Index)
+}
+
+func (v *IndexAddr) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X, &v.Index)
+}
+
+func (*Jump) Operands(rands []*Value) []*Value {
+ return rands
+}
+
+func (v *Lookup) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X, &v.Index)
+}
+
+func (v *MakeChan) Operands(rands []*Value) []*Value {
+ return append(rands, &v.Size)
+}
+
+func (v *MakeClosure) Operands(rands []*Value) []*Value {
+ rands = append(rands, &v.Fn)
+ for i := range v.Bindings {
+ rands = append(rands, &v.Bindings[i])
+ }
+ return rands
+}
+
+func (v *MakeInterface) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+func (v *MakeMap) Operands(rands []*Value) []*Value {
+ return append(rands, &v.Reserve)
+}
+
+func (v *MakeSlice) Operands(rands []*Value) []*Value {
+ return append(rands, &v.Len, &v.Cap)
+}
+
+func (v *MapUpdate) Operands(rands []*Value) []*Value {
+ return append(rands, &v.Map, &v.Key, &v.Value)
+}
+
+func (v *Next) Operands(rands []*Value) []*Value {
+ return append(rands, &v.Iter)
+}
+
+func (s *Panic) Operands(rands []*Value) []*Value {
+ return append(rands, &s.X)
+}
+
+func (v *Phi) Operands(rands []*Value) []*Value {
+ for i := range v.Edges {
+ rands = append(rands, &v.Edges[i])
+ }
+ return rands
+}
+
+func (v *Range) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+func (s *Return) Operands(rands []*Value) []*Value {
+ for i := range s.Results {
+ rands = append(rands, &s.Results[i])
+ }
+ return rands
+}
+
+func (*RunDefers) Operands(rands []*Value) []*Value {
+ return rands
+}
+
+func (v *Select) Operands(rands []*Value) []*Value {
+ for i := range v.States {
+ rands = append(rands, &v.States[i].Chan, &v.States[i].Send)
+ }
+ return rands
+}
+
+func (s *Send) Operands(rands []*Value) []*Value {
+ return append(rands, &s.Chan, &s.X)
+}
+
+func (v *Slice) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X, &v.Low, &v.High, &v.Max)
+}
+
+func (s *Store) Operands(rands []*Value) []*Value {
+ return append(rands, &s.Addr, &s.Val)
+}
+
+func (v *TypeAssert) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+func (v *UnOp) Operands(rands []*Value) []*Value {
+ return append(rands, &v.X)
+}
+
+// Non-Instruction Values:
+func (v *Builtin) Operands(rands []*Value) []*Value { return rands }
+func (v *FreeVar) Operands(rands []*Value) []*Value { return rands }
+func (v *Const) Operands(rands []*Value) []*Value { return rands }
+func (v *Function) Operands(rands []*Value) []*Value { return rands }
+func (v *Global) Operands(rands []*Value) []*Value { return rands }
+func (v *Parameter) Operands(rands []*Value) []*Value { return rands }
diff --git a/llgo/third_party/go.tools/go/ssa/ssautil/switch.go b/llgo/third_party/go.tools/go/ssa/ssautil/switch.go
new file mode 100644
index 0000000000000000000000000000000000000000..5e1065db7a9ee05b45209b93acc1e51bbfa3348a
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/ssautil/switch.go
@@ -0,0 +1,234 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssautil
+
+// This file implements discovery of switch and type-switch constructs
+// from low-level control flow.
+//
+// Many techniques exist for compiling a high-level switch with
+// constant cases to efficient machine code. The optimal choice will
+// depend on the data type, the specific case values, the code in the
+// body of each case, and the hardware.
+// Some examples:
+// - a lookup table (for a switch that maps constants to constants)
+// - a computed goto
+// - a binary tree
+// - a perfect hash
+// - a two-level switch (to partition constant strings by their first byte).
+
+import (
+ "bytes"
+ "fmt"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// A ConstCase represents a single constant comparison.
+// It is part of a Switch.
+type ConstCase struct {
+ Block *ssa.BasicBlock // block performing the comparison
+ Body *ssa.BasicBlock // body of the case
+ Value *ssa.Const // case comparand
+}
+
+// A TypeCase represents a single type assertion.
+// It is part of a Switch.
+type TypeCase struct {
+ Block *ssa.BasicBlock // block performing the type assert
+ Body *ssa.BasicBlock // body of the case
+ Type types.Type // case type
+ Binding ssa.Value // value bound by this case
+}
+
+// A Switch is a logical high-level control flow operation
+// (a multiway branch) discovered by analysis of a CFG containing
+// only if/else chains. It is not part of the ssa.Instruction set.
+//
+// One of ConstCases and TypeCases has length >= 2;
+// the other is nil.
+//
+// In a value switch, the list of cases may contain duplicate constants.
+// A type switch may contain duplicate types, or types assignable
+// to an interface type also in the list.
+// TODO(adonovan): eliminate such duplicates.
+//
+type Switch struct {
+ Start *ssa.BasicBlock // block containing start of if/else chain
+ X ssa.Value // the switch operand
+ ConstCases []ConstCase // ordered list of constant comparisons
+ TypeCases []TypeCase // ordered list of type assertions
+ Default *ssa.BasicBlock // successor if all comparisons fail
+}
+
+func (sw *Switch) String() string {
+ // We represent each block by the String() of its
+ // first Instruction, e.g. "print(42:int)".
+ var buf bytes.Buffer
+ if sw.ConstCases != nil {
+ fmt.Fprintf(&buf, "switch %s {\n", sw.X.Name())
+ for _, c := range sw.ConstCases {
+ fmt.Fprintf(&buf, "case %s: %s\n", c.Value, c.Body.Instrs[0])
+ }
+ } else {
+ fmt.Fprintf(&buf, "switch %s.(type) {\n", sw.X.Name())
+ for _, c := range sw.TypeCases {
+ fmt.Fprintf(&buf, "case %s %s: %s\n",
+ c.Binding.Name(), c.Type, c.Body.Instrs[0])
+ }
+ }
+ if sw.Default != nil {
+ fmt.Fprintf(&buf, "default: %s\n", sw.Default.Instrs[0])
+ }
+ fmt.Fprintf(&buf, "}")
+ return buf.String()
+}
+
+// Switches examines the control-flow graph of fn and returns the
+// set of inferred value and type switches. A value switch tests an
+// ssa.Value for equality against two or more compile-time constant
+// values. Switches involving link-time constants (addresses) are
+// ignored. A type switch type-asserts an ssa.Value against two or
+// more types.
+//
+// The switches are returned in dominance order.
+//
+// The resulting switches do not necessarily correspond to uses of the
+// 'switch' keyword in the source: for example, a single source-level
+// switch statement with non-constant cases may result in zero, one or
+// many Switches, one per plural sequence of constant cases.
+// Switches may even be inferred from if/else- or goto-based control flow.
+// (In general, the control flow constructs of the source program
+// cannot be faithfully reproduced from the SSA representation.)
+//
+func Switches(fn *ssa.Function) []Switch {
+ // Traverse the CFG in dominance order, so we don't
+ // enter an if/else-chain in the middle.
+ var switches []Switch
+ seen := make(map[*ssa.BasicBlock]bool) // TODO(adonovan): opt: use ssa.blockSet
+ for _, b := range fn.DomPreorder() {
+ if x, k := isComparisonBlock(b); x != nil {
+ // Block b starts a switch.
+ sw := Switch{Start: b, X: x}
+ valueSwitch(&sw, k, seen)
+ if len(sw.ConstCases) > 1 {
+ switches = append(switches, sw)
+ }
+ }
+
+ if y, x, T := isTypeAssertBlock(b); y != nil {
+ // Block b starts a type switch.
+ sw := Switch{Start: b, X: x}
+ typeSwitch(&sw, y, T, seen)
+ if len(sw.TypeCases) > 1 {
+ switches = append(switches, sw)
+ }
+ }
+ }
+ return switches
+}
+
+func valueSwitch(sw *Switch, k *ssa.Const, seen map[*ssa.BasicBlock]bool) {
+ b := sw.Start
+ x := sw.X
+ for x == sw.X {
+ if seen[b] {
+ break
+ }
+ seen[b] = true
+
+ sw.ConstCases = append(sw.ConstCases, ConstCase{
+ Block: b,
+ Body: b.Succs[0],
+ Value: k,
+ })
+ b = b.Succs[1]
+ if len(b.Instrs) > 2 {
+ // Block b contains not just 'if x == k',
+ // so it may have side effects that
+ // make it unsafe to elide.
+ break
+ }
+ if len(b.Preds) != 1 {
+ // Block b has multiple predecessors,
+ // so it cannot be treated as a case.
+ break
+ }
+ x, k = isComparisonBlock(b)
+ }
+ sw.Default = b
+}
+
+func typeSwitch(sw *Switch, y ssa.Value, T types.Type, seen map[*ssa.BasicBlock]bool) {
+ b := sw.Start
+ x := sw.X
+ for x == sw.X {
+ if seen[b] {
+ break
+ }
+ seen[b] = true
+
+ sw.TypeCases = append(sw.TypeCases, TypeCase{
+ Block: b,
+ Body: b.Succs[0],
+ Type: T,
+ Binding: y,
+ })
+ b = b.Succs[1]
+ if len(b.Instrs) > 4 {
+ // Block b contains not just
+ // {TypeAssert; Extract #0; Extract #1; If}
+ // so it may have side effects that
+ // make it unsafe to elide.
+ break
+ }
+ if len(b.Preds) != 1 {
+ // Block b has multiple predecessors,
+ // so it cannot be treated as a case.
+ break
+ }
+ y, x, T = isTypeAssertBlock(b)
+ }
+ sw.Default = b
+}
+
+// isComparisonBlock returns the operands (v, k) if a block ends with
+// a comparison v==k, where k is a compile-time constant.
+//
+func isComparisonBlock(b *ssa.BasicBlock) (v ssa.Value, k *ssa.Const) {
+ if n := len(b.Instrs); n >= 2 {
+ if i, ok := b.Instrs[n-1].(*ssa.If); ok {
+ if binop, ok := i.Cond.(*ssa.BinOp); ok && binop.Block() == b && binop.Op == token.EQL {
+ if k, ok := binop.Y.(*ssa.Const); ok {
+ return binop.X, k
+ }
+ if k, ok := binop.X.(*ssa.Const); ok {
+ return binop.Y, k
+ }
+ }
+ }
+ }
+ return
+}
+
+// isTypeAssertBlock returns the operands (y, x, T) if a block ends with
+// a type assertion "if y, ok := x.(T); ok {".
+//
+func isTypeAssertBlock(b *ssa.BasicBlock) (y, x ssa.Value, T types.Type) {
+ if n := len(b.Instrs); n >= 4 {
+ if i, ok := b.Instrs[n-1].(*ssa.If); ok {
+ if ext1, ok := i.Cond.(*ssa.Extract); ok && ext1.Block() == b && ext1.Index == 1 {
+ if ta, ok := ext1.Tuple.(*ssa.TypeAssert); ok && ta.Block() == b {
+ // hack: relies upon instruction ordering.
+ if ext0, ok := b.Instrs[n-3].(*ssa.Extract); ok {
+ return ext0, ta.X, ta.AssertedType
+ }
+ }
+ }
+ }
+ }
+ return
+}
diff --git a/llgo/third_party/go.tools/go/ssa/ssautil/switch_test.go b/llgo/third_party/go.tools/go/ssa/ssautil/switch_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..ceb09e6af8b035cb949a9e20e36b1272fff8d497
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/ssautil/switch_test.go
@@ -0,0 +1,70 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssautil_test
+
+import (
+ "go/parser"
+ "strings"
+ "testing"
+
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/ssa/ssautil"
+)
+
+func TestSwitches(t *testing.T) {
+ conf := loader.Config{ParserMode: parser.ParseComments}
+ f, err := conf.ParseFile("testdata/switches.go", nil)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ conf.CreateFromFiles("main", f)
+ iprog, err := conf.Load()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ prog := ssa.Create(iprog, 0)
+ mainPkg := prog.Package(iprog.Created[0].Pkg)
+ mainPkg.Build()
+
+ for _, mem := range mainPkg.Members {
+ if fn, ok := mem.(*ssa.Function); ok {
+ if fn.Synthetic != "" {
+ continue // e.g. init()
+ }
+ // Each (multi-line) "switch" comment within
+ // this function must match the printed form
+ // of a ConstSwitch.
+ var wantSwitches []string
+ for _, c := range f.Comments {
+ if fn.Syntax().Pos() <= c.Pos() && c.Pos() < fn.Syntax().End() {
+ text := strings.TrimSpace(c.Text())
+ if strings.HasPrefix(text, "switch ") {
+ wantSwitches = append(wantSwitches, text)
+ }
+ }
+ }
+
+ switches := ssautil.Switches(fn)
+ if len(switches) != len(wantSwitches) {
+ t.Errorf("in %s, found %d switches, want %d", fn, len(switches), len(wantSwitches))
+ }
+ for i, sw := range switches {
+ got := sw.String()
+ if i >= len(wantSwitches) {
+ continue
+ }
+ want := wantSwitches[i]
+ if got != want {
+ t.Errorf("in %s, found switch %d: got <<%s>>, want <<%s>>", fn, i, got, want)
+ }
+ }
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/go/ssa/ssautil/testdata/switches.go b/llgo/third_party/go.tools/go/ssa/ssautil/testdata/switches.go
new file mode 100644
index 0000000000000000000000000000000000000000..8ab4c118f161680fd3e568722c2037d88f48dfe6
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/ssautil/testdata/switches.go
@@ -0,0 +1,357 @@
+// +build ignore
+
+package main
+
+// This file is the input to TestSwitches in switch_test.go.
+// Each multiway conditional with constant or type cases (Switch)
+// discovered by Switches is printed, and compared with the
+// comments.
+//
+// The body of each case is printed as the value of its first
+// instruction.
+
+// -------- Value switches --------
+
+func SimpleSwitch(x, y int) {
+ // switch x {
+ // case 1:int: print(1:int)
+ // case 2:int: print(23:int)
+ // case 3:int: print(23:int)
+ // case 4:int: print(3:int)
+ // default: x == y
+ // }
+ switch x {
+ case 1:
+ print(1)
+ case 2, 3:
+ print(23)
+ fallthrough
+ case 4:
+ print(3)
+ default:
+ print(4)
+ case y:
+ print(5)
+ }
+ print(6)
+}
+
+func four() int { return 4 }
+
+// A non-constant case makes a switch "impure", but its pure
+// cases form two separate switches.
+func SwitchWithNonConstantCase(x int) {
+ // switch x {
+ // case 1:int: print(1:int)
+ // case 2:int: print(23:int)
+ // case 3:int: print(23:int)
+ // default: four()
+ // }
+
+ // switch x {
+ // case 5:int: print(5:int)
+ // case 6:int: print(6:int)
+ // default: print("done":string)
+ // }
+ switch x {
+ case 1:
+ print(1)
+ case 2, 3:
+ print(23)
+ case four():
+ print(3)
+ case 5:
+ print(5)
+ case 6:
+ print(6)
+ }
+ print("done")
+}
+
+// Switches may be found even where the source
+// program doesn't have a switch statement.
+
+func ImplicitSwitches(x, y int) {
+ // switch x {
+ // case 1:int: print(12:int)
+ // case 2:int: print(12:int)
+ // default: x < 5:int
+ // }
+ if x == 1 || 2 == x || x < 5 {
+ print(12)
+ }
+
+ // switch x {
+ // case 3:int: print(34:int)
+ // case 4:int: print(34:int)
+ // default: x == y
+ // }
+ if x == 3 || 4 == x || x == y {
+ print(34)
+ }
+
+ // Not a switch: no consistent variable.
+ if x == 5 || y == 6 {
+ print(56)
+ }
+
+ // Not a switch: only one constant comparison.
+ if x == 7 || x == y {
+ print(78)
+ }
+}
+
+func IfElseBasedSwitch(x int) {
+ // switch x {
+ // case 1:int: print(1:int)
+ // case 2:int: print(2:int)
+ // default: print("else":string)
+ // }
+ if x == 1 {
+ print(1)
+ } else if x == 2 {
+ print(2)
+ } else {
+ print("else")
+ }
+}
+
+func GotoBasedSwitch(x int) {
+ // switch x {
+ // case 1:int: print(1:int)
+ // case 2:int: print(2:int)
+ // default: print("else":string)
+ // }
+ if x == 1 {
+ goto L1
+ }
+ if x == 2 {
+ goto L2
+ }
+ print("else")
+L1:
+ print(1)
+ goto end
+L2:
+ print(2)
+end:
+}
+
+func SwitchInAForLoop(x int) {
+ // switch x {
+ // case 1:int: print(1:int)
+ // case 2:int: print(2:int)
+ // default: print("head":string)
+ // }
+loop:
+ for {
+ print("head")
+ switch x {
+ case 1:
+ print(1)
+ break loop
+ case 2:
+ print(2)
+ break loop
+ }
+ }
+}
+
+// This case is a switch in a for-loop, both constructed using goto.
+// As before, the default case points back to the block containing the
+// switch, but that's ok.
+func SwitchInAForLoopUsingGoto(x int) {
+ // switch x {
+ // case 1:int: print(1:int)
+ // case 2:int: print(2:int)
+ // default: print("head":string)
+ // }
+loop:
+ print("head")
+ if x == 1 {
+ goto L1
+ }
+ if x == 2 {
+ goto L2
+ }
+ goto loop
+L1:
+ print(1)
+ goto end
+L2:
+ print(2)
+end:
+}
+
+func UnstructuredSwitchInAForLoop(x int) {
+ // switch x {
+ // case 1:int: print(1:int)
+ // case 2:int: x == 1:int
+ // default: print("end":string)
+ // }
+ for {
+ if x == 1 {
+ print(1)
+ return
+ }
+ if x == 2 {
+ continue
+ }
+ break
+ }
+ print("end")
+}
+
+func CaseWithMultiplePreds(x int) {
+ for {
+ if x == 1 {
+ print(1)
+ return
+ }
+ loop:
+ // This block has multiple predecessors,
+ // so can't be treated as a switch case.
+ if x == 2 {
+ goto loop
+ }
+ break
+ }
+ print("end")
+}
+
+func DuplicateConstantsAreNotEliminated(x int) {
+ // switch x {
+ // case 1:int: print(1:int)
+ // case 1:int: print("1a":string)
+ // case 2:int: print(2:int)
+ // default: return
+ // }
+ if x == 1 {
+ print(1)
+ } else if x == 1 { // duplicate => unreachable
+ print("1a")
+ } else if x == 2 {
+ print(2)
+ }
+}
+
+// Interface values (created by comparisons) are not constants,
+// so ConstSwitch.X is never of interface type.
+func MakeInterfaceIsNotAConstant(x interface{}) {
+ if x == "foo" {
+ print("foo")
+ } else if x == 1 {
+ print(1)
+ }
+}
+
+func ZeroInitializedVarsAreConstants(x int) {
+ // switch x {
+ // case 0:int: print(1:int)
+ // case 2:int: print(2:int)
+ // default: print("end":string)
+ // }
+ var zero int // SSA construction replaces zero with 0
+ if x == zero {
+ print(1)
+ } else if x == 2 {
+ print(2)
+ }
+ print("end")
+}
+
+// -------- Select --------
+
+// NB, potentially fragile reliance on register number.
+func SelectDesugarsToSwitch(ch chan int) {
+ // switch t1 {
+ // case 0:int: extract t0 #2
+ // case 1:int: println(0:int)
+ // case 2:int: println(1:int)
+ // default: println("default":string)
+ // }
+ select {
+ case x := <-ch:
+ println(x)
+ case <-ch:
+ println(0)
+ case ch <- 1:
+ println(1)
+ default:
+ println("default")
+ }
+}
+
+// NB, potentially fragile reliance on register number.
+func NonblockingSelectDefaultCasePanics(ch chan int) {
+ // switch t1 {
+ // case 0:int: extract t0 #2
+ // case 1:int: println(0:int)
+ // case 2:int: println(1:int)
+ // default: make interface{} <- string ("blocking select m...":string)
+ // }
+ select {
+ case x := <-ch:
+ println(x)
+ case <-ch:
+ println(0)
+ case ch <- 1:
+ println(1)
+ }
+}
+
+// -------- Type switches --------
+
+// NB, reliance on fragile register numbering.
+func SimpleTypeSwitch(x interface{}) {
+ // switch x.(type) {
+ // case t3 int: println(x)
+ // case t7 bool: println(x)
+ // case t10 string: println(t10)
+ // default: println(x)
+ // }
+ switch y := x.(type) {
+ case nil:
+ println(y)
+ case int, bool:
+ println(y)
+ case string:
+ println(y)
+ default:
+ println(y)
+ }
+}
+
+// NB, potentially fragile reliance on register number.
+func DuplicateTypesAreNotEliminated(x interface{}) {
+ // switch x.(type) {
+ // case t1 string: println(1:int)
+ // case t5 interface{}: println(t5)
+ // case t9 int: println(3:int)
+ // default: return
+ // }
+ switch y := x.(type) {
+ case string:
+ println(1)
+ case interface{}:
+ println(y)
+ case int:
+ println(3) // unreachable!
+ }
+}
+
+// NB, potentially fragile reliance on register number.
+func AdHocTypeSwitch(x interface{}) {
+ // switch x.(type) {
+ // case t1 int: println(t1)
+ // case t5 string: println(t5)
+ // default: print("default":string)
+ // }
+ if i, ok := x.(int); ok {
+ println(i)
+ } else if s, ok := x.(string); ok {
+ println(s)
+ } else {
+ print("default")
+ }
+}
diff --git a/llgo/third_party/go.tools/go/ssa/ssautil/visit.go b/llgo/third_party/go.tools/go/ssa/ssautil/visit.go
new file mode 100644
index 0000000000000000000000000000000000000000..01ad06c18431d456c02bb19e1cf8cfe454906cd5
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/ssautil/visit.go
@@ -0,0 +1,66 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssautil
+
+import "llvm.org/llgo/third_party/go.tools/go/ssa"
+
+// This file defines utilities for visiting the SSA representation of
+// a Program.
+//
+// TODO(adonovan): test coverage.
+
+// AllFunctions finds and returns the set of functions potentially
+// needed by program prog, as determined by a simple linker-style
+// reachability algorithm starting from the members and method-sets of
+// each package. The result may include anonymous functions and
+// synthetic wrappers.
+//
+// Precondition: all packages are built.
+//
+func AllFunctions(prog *ssa.Program) map[*ssa.Function]bool {
+ visit := visitor{
+ prog: prog,
+ seen: make(map[*ssa.Function]bool),
+ }
+ visit.program()
+ return visit.seen
+}
+
+type visitor struct {
+ prog *ssa.Program
+ seen map[*ssa.Function]bool
+}
+
+func (visit *visitor) program() {
+ for _, pkg := range visit.prog.AllPackages() {
+ for _, mem := range pkg.Members {
+ if fn, ok := mem.(*ssa.Function); ok {
+ visit.function(fn)
+ }
+ }
+ }
+ for _, T := range visit.prog.TypesWithMethodSets() {
+ mset := visit.prog.MethodSets.MethodSet(T)
+ for i, n := 0, mset.Len(); i < n; i++ {
+ visit.function(visit.prog.Method(mset.At(i)))
+ }
+ }
+}
+
+func (visit *visitor) function(fn *ssa.Function) {
+ if !visit.seen[fn] {
+ visit.seen[fn] = true
+ var buf [10]*ssa.Value // avoid alloc in common case
+ for _, b := range fn.Blocks {
+ for _, instr := range b.Instrs {
+ for _, op := range instr.Operands(buf[:0]) {
+ if fn, ok := (*op).(*ssa.Function); ok {
+ visit.function(fn)
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/go/ssa/stdlib_test.go b/llgo/third_party/go.tools/go/ssa/stdlib_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..4bcf73b30e67af6ae9736aef8ec8dea9300861bb
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/stdlib_test.go
@@ -0,0 +1,130 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+// This file runs the SSA builder in sanity-checking mode on all
+// packages beneath $GOROOT and prints some summary information.
+//
+// Run with "go test -cpu=8 to" set GOMAXPROCS.
+
+import (
+ "go/build"
+ "go/token"
+ "runtime"
+ "testing"
+ "time"
+
+ "llvm.org/llgo/third_party/go.tools/go/buildutil"
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/ssa/ssautil"
+)
+
+func bytesAllocated() uint64 {
+ runtime.GC()
+ var stats runtime.MemStats
+ runtime.ReadMemStats(&stats)
+ return stats.Alloc
+}
+
+func TestStdlib(t *testing.T) {
+ // Load, parse and type-check the program.
+ t0 := time.Now()
+ alloc0 := bytesAllocated()
+
+ // Load, parse and type-check the program.
+ ctxt := build.Default // copy
+ ctxt.GOPATH = "" // disable GOPATH
+ conf := loader.Config{
+ SourceImports: true,
+ Build: &ctxt,
+ }
+ if _, err := conf.FromArgs(buildutil.AllPackages(conf.Build), true); err != nil {
+ t.Errorf("FromArgs failed: %v", err)
+ return
+ }
+
+ iprog, err := conf.Load()
+ if err != nil {
+ t.Fatalf("Load failed: %v", err)
+ }
+
+ t1 := time.Now()
+ alloc1 := bytesAllocated()
+
+ // Create SSA packages.
+ var mode ssa.BuilderMode
+ // Comment out these lines during benchmarking. Approx SSA build costs are noted.
+ mode |= ssa.SanityCheckFunctions // + 2% space, + 4% time
+ mode |= ssa.GlobalDebug // +30% space, +18% time
+ prog := ssa.Create(iprog, mode)
+
+ t2 := time.Now()
+
+ // Build SSA.
+ prog.BuildAll()
+
+ t3 := time.Now()
+ alloc3 := bytesAllocated()
+
+ numPkgs := len(prog.AllPackages())
+ if want := 140; numPkgs < want {
+ t.Errorf("Loaded only %d packages, want at least %d", numPkgs, want)
+ }
+
+ // Keep iprog reachable until after we've measured memory usage.
+ if len(iprog.AllPackages) == 0 {
+ print() // unreachable
+ }
+
+ allFuncs := ssautil.AllFunctions(prog)
+
+ // Check that all non-synthetic functions have distinct names.
+ byName := make(map[string]*ssa.Function)
+ for fn := range allFuncs {
+ if fn.Synthetic == "" {
+ str := fn.String()
+ prev := byName[str]
+ byName[str] = fn
+ if prev != nil {
+ t.Errorf("%s: duplicate function named %s",
+ prog.Fset.Position(fn.Pos()), str)
+ t.Errorf("%s: (previously defined here)",
+ prog.Fset.Position(prev.Pos()))
+ }
+ }
+ }
+
+ // Dump some statistics.
+ var numInstrs int
+ for fn := range allFuncs {
+ for _, b := range fn.Blocks {
+ numInstrs += len(b.Instrs)
+ }
+ }
+
+ // determine line count
+ var lineCount int
+ prog.Fset.Iterate(func(f *token.File) bool {
+ lineCount += f.LineCount()
+ return true
+ })
+
+ // NB: when benchmarking, don't forget to clear the debug +
+ // sanity builder flags for better performance.
+
+ t.Log("GOMAXPROCS: ", runtime.GOMAXPROCS(0))
+ t.Log("#Source lines: ", lineCount)
+ t.Log("Load/parse/typecheck: ", t1.Sub(t0))
+ t.Log("SSA create: ", t2.Sub(t1))
+ t.Log("SSA build: ", t3.Sub(t2))
+
+ // SSA stats:
+ t.Log("#Packages: ", numPkgs)
+ t.Log("#Functions: ", len(allFuncs))
+ t.Log("#Instructions: ", numInstrs)
+ t.Log("#MB AST+types: ", int64(alloc1-alloc0)/1e6)
+ t.Log("#MB SSA: ", int64(alloc3-alloc1)/1e6)
+}
diff --git a/llgo/third_party/go.tools/go/ssa/testdata/objlookup.go b/llgo/third_party/go.tools/go/ssa/testdata/objlookup.go
new file mode 100644
index 0000000000000000000000000000000000000000..bd266e4550ee74b1893b66d482f0d726f314df18
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/testdata/objlookup.go
@@ -0,0 +1,160 @@
+//+build ignore
+
+package main
+
+// This file is the input to TestObjValueLookup in source_test.go,
+// which ensures that each occurrence of an ident defining or
+// referring to a func, var or const object can be mapped to its
+// corresponding SSA Value.
+//
+// For every reference to a var object, we use annotations in comments
+// to denote both the expected SSA Value kind, and whether to expect
+// its value (x) or its address (&x).
+//
+// For const and func objects, the results don't vary by reference and
+// are always values not addresses, so no annotations are needed. The
+// declaration is enough.
+
+import "fmt"
+import "os"
+
+type J int
+
+func (*J) method() {}
+
+const globalConst = 0
+
+var globalVar int // &globalVar::Global
+
+func globalFunc() {}
+
+type I interface {
+ interfaceMethod()
+}
+
+type S struct {
+ x int // x::nil
+}
+
+func main() {
+ print(globalVar) // globalVar::UnOp
+ globalVar = 1 // globalVar::Const
+
+ var v0 int = 1 // v0::Const (simple local value spec)
+ if v0 > 0 { // v0::Const
+ v0 = 2 // v0::Const
+ }
+ print(v0) // v0::Phi
+
+ // v1 is captured and thus implicitly address-taken.
+ var v1 int = 1 // v1::Const
+ v1 = 2 // v1::Const
+ fmt.Println(v1) // v1::UnOp (load)
+ f := func(param int) { // f::MakeClosure param::Parameter
+ if y := 1; y > 0 { // y::Const
+ print(v1, param) // v1::UnOp (load) param::Parameter
+ }
+ param = 2 // param::Const
+ println(param) // param::Const
+ }
+
+ f(0) // f::MakeClosure
+
+ var v2 int // v2::Const (implicitly zero-initialized local value spec)
+ print(v2) // v2::Const
+
+ m := make(map[string]int) // m::MakeMap
+
+ // Local value spec with multi-valued RHS:
+ var v3, v4 = m[""] // v3::Extract v4::Extract m::MakeMap
+ print(v3) // v3::Extract
+ print(v4) // v4::Extract
+
+ v3++ // v3::BinOp (assign with op)
+ v3 += 2 // v3::BinOp (assign with op)
+
+ v5, v6 := false, "" // v5::Const v6::Const (defining assignment)
+ print(v5) // v5::Const
+ print(v6) // v6::Const
+
+ var v7 S // &v7::Alloc
+ v7.x = 1 // &v7::Alloc &x::FieldAddr
+ print(v7.x) // &v7::Alloc &x::FieldAddr
+
+ var v8 [1]int // &v8::Alloc
+ v8[0] = 0 // &v8::Alloc
+ print(v8[:]) // &v8::Alloc
+ _ = v8[0] // &v8::Alloc
+ _ = v8[:][0] // &v8::Alloc
+ v8ptr := &v8 // v8ptr::Alloc &v8::Alloc
+ _ = v8ptr[0] // v8ptr::Alloc
+ _ = *v8ptr // v8ptr::Alloc
+
+ v8a := make([]int, 1) // v8a::MakeSlice
+ v8a[0] = 0 // v8a::MakeSlice
+ print(v8a[:]) // v8a::MakeSlice
+
+ v9 := S{} // &v9::Alloc
+
+ v10 := &v9 // v10::Alloc &v9::Alloc
+ _ = v10 // v10::Alloc
+
+ var v11 *J = nil // v11::Const
+ v11.method() // v11::Const
+
+ var v12 J // &v12::Alloc
+ v12.method() // &v12::Alloc (implicitly address-taken)
+
+ // NB, in the following, 'method' resolves to the *types.Func
+ // of (*J).method, so it doesn't help us locate the specific
+ // ssa.Values here: a bound-method closure and a promotion
+ // wrapper.
+ _ = v11.method // v11::Const
+ _ = (*struct{ J }).method // J::nil
+
+ // These vars are not optimised away.
+ if false {
+ v13 := 0 // v13::Const
+ println(v13) // v13::Const
+ }
+
+ switch x := 1; x { // x::Const
+ case v0: // v0::Phi
+ }
+
+ for k, v := range m { // k::Extract v::Extract m::MakeMap
+ _ = k // k::Extract
+ v++ // v::BinOp
+ }
+
+ if y := 0; y > 1 { // y::Const y::Const
+ }
+
+ var i interface{} // i::Const (nil interface)
+ i = 1 // i::MakeInterface
+ switch i := i.(type) { // i::MakeInterface i::MakeInterface
+ case int:
+ println(i) // i::Extract
+ }
+
+ ch := make(chan int) // ch::MakeChan
+ select {
+ case x := <-ch: // x::UnOp (receive) ch::MakeChan
+ _ = x // x::UnOp
+ }
+
+ // .Op is an inter-package FieldVal-selection.
+ var err os.PathError // &err::Alloc
+ _ = err.Op // &err::Alloc &Op::FieldAddr
+ _ = &err.Op // &err::Alloc &Op::FieldAddr
+
+ // Exercise corner-cases of lvalues vs rvalues.
+ // (Guessing IsAddr from the 'pointerness' won't cut it here.)
+ type N *N
+ var n N // n::Const
+ n1 := n // n1::Const n::Const
+ n2 := &n1 // n2::Alloc &n1::Alloc
+ n3 := *n2 // n3::UnOp n2::Alloc
+ n4 := **n3 // n4::UnOp n3::UnOp
+ _ = n4 // n4::UnOp
+}
diff --git a/llgo/third_party/go.tools/go/ssa/testdata/valueforexpr.go b/llgo/third_party/go.tools/go/ssa/testdata/valueforexpr.go
new file mode 100644
index 0000000000000000000000000000000000000000..70906cac4ee026de4814f3db42e33c9b64a2ee25
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/testdata/valueforexpr.go
@@ -0,0 +1,148 @@
+//+build ignore
+
+package main
+
+// This file is the input to TestValueForExpr in source_test.go, which
+// ensures that each expression e immediately following a /*@kind*/(x)
+// annotation, when passed to Function.ValueForExpr(e), returns a
+// non-nil Value of the same type as e and of kind 'kind'.
+
+func f(spilled, unspilled int) {
+ _ = /*@UnOp*/ (spilled)
+ _ = /*@Parameter*/ (unspilled)
+ _ = /*@*/ (1 + 2) // (constant)
+ i := 0
+ /*@Call*/ (print( /*@BinOp*/ (i + 1)))
+ ch := /*@MakeChan*/ (make(chan int))
+ /*@UnOp*/ (<-ch)
+ x := /*@UnOp*/ (<-ch)
+ _ = x
+ select {
+ case /*@Extract*/ (<-ch):
+ case x := /*@Extract*/ (<-ch):
+ _ = x
+ }
+ defer /*@Function*/ (func() {
+ })()
+ go /*@Function*/ (func() {
+ })()
+ y := 0
+ if true && /*@BinOp*/ (bool(y > 0)) {
+ y = 1
+ }
+ _ = /*@Phi*/ (y)
+ map1 := /*@MakeMap*/ (make(map[string]string))
+ _ = map1
+ _ = /*@MakeSlice*/ (make([]int, 0))
+ _ = /*@MakeClosure*/ (func() { print(spilled) })
+
+ sl := []int{}
+ _ = /*@Slice*/ (sl[:0])
+
+ _ = /*@*/ (new(int)) // optimized away
+ tmp := /*@Alloc*/ (new(int))
+ _ = tmp
+ var iface interface{}
+ _ = /*@TypeAssert*/ (iface.(int))
+ _ = /*@UnOp*/ (sl[0])
+ _ = /*@IndexAddr*/ (&sl[0])
+ _ = /*@Index*/ ([2]int{}[0])
+ var p *int
+ _ = /*@UnOp*/ (*p)
+
+ _ = /*@UnOp*/ (global)
+ /*@UnOp*/ (global)[""] = ""
+ /*@Global*/ (global) = map[string]string{}
+
+ var local t
+ /*UnOp*/ (local.x) = 1
+
+ // Exercise corner-cases of lvalues vs rvalues.
+ type N *N
+ var n N
+ /*@UnOp*/ (n) = /*@UnOp*/ (n)
+ /*@ChangeType*/ (n) = /*@Alloc*/ (&n)
+ /*@UnOp*/ (n) = /*@UnOp*/ (*n)
+ /*@UnOp*/ (n) = /*@UnOp*/ (**n)
+}
+
+func complit() {
+ // Composite literals.
+ // We get different results for
+ // - composite literal as value (e.g. operand to print)
+ // - composite literal initializer for addressable value
+ // - composite literal value assigned to blank var
+
+ // 1. Slices
+ print( /*@Slice*/ ([]int{}))
+ print( /*@Alloc*/ (&[]int{}))
+ print(& /*@Alloc*/ ([]int{}))
+
+ sl1 := /*@Slice*/ ([]int{})
+ sl2 := /*@Alloc*/ (&[]int{})
+ sl3 := & /*@Alloc*/ ([]int{})
+ _, _, _ = sl1, sl2, sl3
+
+ _ = /*@Slice*/ ([]int{})
+ _ = /*@*/ (& /*@Slice*/ ([]int{})) // & optimized away
+ _ = & /*@Slice*/ ([]int{})
+
+ // 2. Arrays
+ print( /*@UnOp*/ ([1]int{}))
+ print( /*@Alloc*/ (&[1]int{}))
+ print(& /*@Alloc*/ ([1]int{}))
+
+ arr1 := /*@Alloc*/ ([1]int{})
+ arr2 := /*@Alloc*/ (&[1]int{})
+ arr3 := & /*@Alloc*/ ([1]int{})
+ _, _, _ = arr1, arr2, arr3
+
+ _ = /*@UnOp*/ ([1]int{})
+ _ = /*@Alloc*/ (& /*@Alloc*/ ([1]int{})) // & optimized away
+ _ = & /*@Alloc*/ ([1]int{})
+
+ // 3. Maps
+ type M map[int]int
+ print( /*@MakeMap*/ (M{}))
+ print( /*@Alloc*/ (&M{}))
+ print(& /*@Alloc*/ (M{}))
+
+ m1 := /*@MakeMap*/ (M{})
+ m2 := /*@Alloc*/ (&M{})
+ m3 := & /*@Alloc*/ (M{})
+ _, _, _ = m1, m2, m3
+
+ _ = /*@MakeMap*/ (M{})
+ _ = /*@*/ (& /*@MakeMap*/ (M{})) // & optimized away
+ _ = & /*@MakeMap*/ (M{})
+
+ // 4. Structs
+ print( /*@UnOp*/ (struct{}{}))
+ print( /*@Alloc*/ (&struct{}{}))
+ print(& /*@Alloc*/ (struct{}{}))
+
+ s1 := /*@Alloc*/ (struct{}{})
+ s2 := /*@Alloc*/ (&struct{}{})
+ s3 := & /*@Alloc*/ (struct{}{})
+ _, _, _ = s1, s2, s3
+
+ _ = /*@UnOp*/ (struct{}{})
+ _ = /*@Alloc*/ (& /*@Alloc*/ (struct{}{}))
+ _ = & /*@Alloc*/ (struct{}{})
+}
+
+type t struct{ x int }
+
+// Ensure we can locate methods of named types.
+func (t) f(param int) {
+ _ = /*@Parameter*/ (param)
+}
+
+// Ensure we can locate init functions.
+func init() {
+ m := /*@MakeMap*/ (make(map[string]string))
+ _ = m
+}
+
+// Ensure we can locate variables in initializer expressions.
+var global = /*@MakeMap*/ (make(map[string]string))
diff --git a/llgo/third_party/go.tools/go/ssa/testmain.go b/llgo/third_party/go.tools/go/ssa/testmain.go
new file mode 100644
index 0000000000000000000000000000000000000000..e19c27f73c4c2c71fd212f33edf81880b6f3f5e4
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/testmain.go
@@ -0,0 +1,286 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// CreateTestMainPackage synthesizes a main package that runs all the
+// tests of the supplied packages.
+// It is closely coupled to $GOROOT/src/cmd/go/test.go and $GOROOT/src/testing.
+
+import (
+ "go/ast"
+ "go/token"
+ "os"
+ "strings"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// FindTests returns the list of packages that define at least one Test,
+// Example or Benchmark function (as defined by "go test"), and the
+// lists of all such functions.
+//
+func FindTests(pkgs []*Package) (testpkgs []*Package, tests, benchmarks, examples []*Function) {
+ if len(pkgs) == 0 {
+ return
+ }
+ prog := pkgs[0].Prog
+
+ // The first two of these may be nil: if the program doesn't import "testing",
+ // it can't contain any tests, but it may yet contain Examples.
+ var testSig *types.Signature // func(*testing.T)
+ var benchmarkSig *types.Signature // func(*testing.B)
+ var exampleSig = types.NewSignature(nil, nil, nil, nil, false) // func()
+
+ // Obtain the types from the parameters of testing.Main().
+ if testingPkg := prog.ImportedPackage("testing"); testingPkg != nil {
+ params := testingPkg.Func("Main").Signature.Params()
+ testSig = funcField(params.At(1).Type())
+ benchmarkSig = funcField(params.At(2).Type())
+ }
+
+ seen := make(map[*Package]bool)
+ for _, pkg := range pkgs {
+ if pkg.Prog != prog {
+ panic("wrong Program")
+ }
+
+ // TODO(adonovan): use a stable order, e.g. lexical.
+ for _, mem := range pkg.Members {
+ if f, ok := mem.(*Function); ok &&
+ ast.IsExported(f.Name()) &&
+ strings.HasSuffix(prog.Fset.Position(f.Pos()).Filename, "_test.go") {
+
+ switch {
+ case testSig != nil && isTestSig(f, "Test", testSig):
+ tests = append(tests, f)
+ case benchmarkSig != nil && isTestSig(f, "Benchmark", benchmarkSig):
+ benchmarks = append(benchmarks, f)
+ case isTestSig(f, "Example", exampleSig):
+ examples = append(examples, f)
+ default:
+ continue
+ }
+
+ if !seen[pkg] {
+ seen[pkg] = true
+ testpkgs = append(testpkgs, pkg)
+ }
+ }
+ }
+ }
+ return
+}
+
+// Like isTest, but checks the signature too.
+func isTestSig(f *Function, prefix string, sig *types.Signature) bool {
+ return isTest(f.Name(), prefix) && types.Identical(f.Signature, sig)
+}
+
+// If non-nil, testMainStartBodyHook is called immediately after
+// startBody for main.init and main.main, making it easy for users to
+// add custom imports and initialization steps for proprietary build
+// systems that don't exactly follow 'go test' conventions.
+var testMainStartBodyHook func(*Function)
+
+// CreateTestMainPackage creates and returns a synthetic "main"
+// package that runs all the tests of the supplied packages, similar
+// to the one that would be created by the 'go test' tool.
+//
+// It returns nil if the program contains no tests.
+//
+func (prog *Program) CreateTestMainPackage(pkgs ...*Package) *Package {
+ pkgs, tests, benchmarks, examples := FindTests(pkgs)
+ if len(pkgs) == 0 {
+ return nil
+ }
+
+ testmain := &Package{
+ Prog: prog,
+ Members: make(map[string]Member),
+ values: make(map[types.Object]Value),
+ Object: types.NewPackage("testmain", "testmain"),
+ }
+
+ // Build package's init function.
+ init := &Function{
+ name: "init",
+ Signature: new(types.Signature),
+ Synthetic: "package initializer",
+ Pkg: testmain,
+ Prog: prog,
+ }
+ init.startBody()
+
+ if testMainStartBodyHook != nil {
+ testMainStartBodyHook(init)
+ }
+
+ // Initialize packages to test.
+ for _, pkg := range pkgs {
+ var v Call
+ v.Call.Value = pkg.init
+ v.setType(types.NewTuple())
+ init.emit(&v)
+ }
+ init.emit(new(Return))
+ init.finishBody()
+ testmain.init = init
+ testmain.Object.MarkComplete()
+ testmain.Members[init.name] = init
+
+ main := &Function{
+ name: "main",
+ Signature: new(types.Signature),
+ Synthetic: "test main function",
+ Prog: prog,
+ Pkg: testmain,
+ }
+
+ main.startBody()
+
+ if testMainStartBodyHook != nil {
+ testMainStartBodyHook(main)
+ }
+
+ if testingPkg := prog.ImportedPackage("testing"); testingPkg != nil {
+ testingMain := testingPkg.Func("Main")
+ testingMainParams := testingMain.Signature.Params()
+
+ // The generated code is as if compiled from this:
+ //
+ // func main() {
+ // match := func(_, _ string) (bool, error) { return true, nil }
+ // tests := []testing.InternalTest{{"TestFoo", TestFoo}, ...}
+ // benchmarks := []testing.InternalBenchmark{...}
+ // examples := []testing.InternalExample{...}
+ // testing.Main(match, tests, benchmarks, examples)
+ // }
+
+ matcher := &Function{
+ name: "matcher",
+ Signature: testingMainParams.At(0).Type().(*types.Signature),
+ Synthetic: "test matcher predicate",
+ parent: main,
+ Pkg: testmain,
+ Prog: prog,
+ }
+ main.AnonFuncs = append(main.AnonFuncs, matcher)
+ matcher.startBody()
+ matcher.emit(&Return{Results: []Value{vTrue, nilConst(types.Universe.Lookup("error").Type())}})
+ matcher.finishBody()
+
+ // Emit call: testing.Main(matcher, tests, benchmarks, examples).
+ var c Call
+ c.Call.Value = testingMain
+ c.Call.Args = []Value{
+ matcher,
+ testMainSlice(main, tests, testingMainParams.At(1).Type()),
+ testMainSlice(main, benchmarks, testingMainParams.At(2).Type()),
+ testMainSlice(main, examples, testingMainParams.At(3).Type()),
+ }
+ emitTailCall(main, &c)
+ } else {
+ // The program does not import "testing", but FindTests
+ // returned non-nil, which must mean there were Examples
+ // but no Tests or Benchmarks.
+ // We'll simply call them from testmain.main; this will
+ // ensure they don't panic, but will not check any
+ // "Output:" comments.
+ for _, eg := range examples {
+ var c Call
+ c.Call.Value = eg
+ c.setType(types.NewTuple())
+ main.emit(&c)
+ }
+ main.emit(&Return{})
+ main.currentBlock = nil
+ }
+
+ main.finishBody()
+
+ testmain.Members["main"] = main
+
+ if prog.mode&PrintPackages != 0 {
+ printMu.Lock()
+ testmain.WriteTo(os.Stdout)
+ printMu.Unlock()
+ }
+
+ if prog.mode&SanityCheckFunctions != 0 {
+ sanityCheckPackage(testmain)
+ }
+
+ prog.packages[testmain.Object] = testmain
+
+ return testmain
+}
+
+// testMainSlice emits to fn code to construct a slice of type slice
+// (one of []testing.Internal{Test,Benchmark,Example}) for all
+// functions in testfuncs. It returns the slice value.
+//
+func testMainSlice(fn *Function, testfuncs []*Function, slice types.Type) Value {
+ if testfuncs == nil {
+ return nilConst(slice)
+ }
+
+ tElem := slice.(*types.Slice).Elem()
+ tPtrString := types.NewPointer(tString)
+ tPtrElem := types.NewPointer(tElem)
+ tPtrFunc := types.NewPointer(funcField(slice))
+
+ // Emit: array = new [n]testing.InternalTest
+ tArray := types.NewArray(tElem, int64(len(testfuncs)))
+ array := emitNew(fn, tArray, token.NoPos)
+ array.Comment = "test main"
+ for i, testfunc := range testfuncs {
+ // Emit: pitem = &array[i]
+ ia := &IndexAddr{X: array, Index: intConst(int64(i))}
+ ia.setType(tPtrElem)
+ pitem := fn.emit(ia)
+
+ // Emit: pname = &pitem.Name
+ fa := &FieldAddr{X: pitem, Field: 0} // .Name
+ fa.setType(tPtrString)
+ pname := fn.emit(fa)
+
+ // Emit: *pname = "testfunc"
+ emitStore(fn, pname, stringConst(testfunc.Name()))
+
+ // Emit: pfunc = &pitem.F
+ fa = &FieldAddr{X: pitem, Field: 1} // .F
+ fa.setType(tPtrFunc)
+ pfunc := fn.emit(fa)
+
+ // Emit: *pfunc = testfunc
+ emitStore(fn, pfunc, testfunc)
+ }
+
+ // Emit: slice array[:]
+ sl := &Slice{X: array}
+ sl.setType(slice)
+ return fn.emit(sl)
+}
+
+// Given the type of one of the three slice parameters of testing.Main,
+// returns the function type.
+func funcField(slice types.Type) *types.Signature {
+ return slice.(*types.Slice).Elem().Underlying().(*types.Struct).Field(1).Type().(*types.Signature)
+}
+
+// Plundered from $GOROOT/src/cmd/go/test.go
+
+// isTest tells whether name looks like a test (or benchmark, according to prefix).
+// It is a Test (say) if there is a character after Test that is not a lower-case letter.
+// We don't want TesticularCancer.
+func isTest(name, prefix string) bool {
+ if !strings.HasPrefix(name, prefix) {
+ return false
+ }
+ if len(name) == len(prefix) { // "Test" is ok
+ return true
+ }
+ return ast.IsExported(name[len(prefix):])
+}
diff --git a/llgo/third_party/go.tools/go/ssa/testmain_test.go b/llgo/third_party/go.tools/go/ssa/testmain_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..41b1df692e5a38bde46eea49ebfe0e8b4341ff8f
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/testmain_test.go
@@ -0,0 +1,122 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+// Tests of FindTests. CreateTestMainPackage is tested via the interpreter.
+// TODO(adonovan): test the 'pkgs' result from FindTests.
+
+import (
+ "fmt"
+ "sort"
+ "testing"
+
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+)
+
+func create(t *testing.T, content string) []*ssa.Package {
+ var conf loader.Config
+ f, err := conf.ParseFile("foo_test.go", content)
+ if err != nil {
+ t.Fatal(err)
+ }
+ conf.CreateFromFiles("foo", f)
+
+ iprog, err := conf.Load()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // We needn't call Build.
+ return ssa.Create(iprog, ssa.SanityCheckFunctions).AllPackages()
+}
+
+func TestFindTests(t *testing.T) {
+ test := `
+package foo
+
+import "testing"
+
+type T int
+
+// Tests:
+func Test(t *testing.T) {}
+func TestA(t *testing.T) {}
+func TestB(t *testing.T) {}
+
+// Not tests:
+func testC(t *testing.T) {}
+func TestD() {}
+func testE(t *testing.T) int { return 0 }
+func (T) Test(t *testing.T) {}
+
+// Benchmarks:
+func Benchmark(*testing.B) {}
+func BenchmarkA(b *testing.B) {}
+func BenchmarkB(*testing.B) {}
+
+// Not benchmarks:
+func benchmarkC(t *testing.T) {}
+func BenchmarkD() {}
+func benchmarkE(t *testing.T) int { return 0 }
+func (T) Benchmark(t *testing.T) {}
+
+// Examples:
+func Example() {}
+func ExampleA() {}
+
+// Not examples:
+func exampleC() {}
+func ExampleD(t *testing.T) {}
+func exampleE() int { return 0 }
+func (T) Example() {}
+`
+ pkgs := create(t, test)
+ _, tests, benchmarks, examples := ssa.FindTests(pkgs)
+
+ sort.Sort(funcsByPos(tests))
+ if got, want := fmt.Sprint(tests), "[foo.Test foo.TestA foo.TestB]"; got != want {
+ t.Errorf("FindTests.tests = %s, want %s", got, want)
+ }
+
+ sort.Sort(funcsByPos(benchmarks))
+ if got, want := fmt.Sprint(benchmarks), "[foo.Benchmark foo.BenchmarkA foo.BenchmarkB]"; got != want {
+ t.Errorf("FindTests.benchmarks = %s, want %s", got, want)
+ }
+
+ sort.Sort(funcsByPos(examples))
+ if got, want := fmt.Sprint(examples), "[foo.Example foo.ExampleA]"; got != want {
+ t.Errorf("FindTests examples = %s, want %s", got, want)
+ }
+}
+
+func TestFindTestsTesting(t *testing.T) {
+ test := `
+package foo
+
+// foo does not import "testing", but defines Examples.
+
+func Example() {}
+func ExampleA() {}
+`
+ pkgs := create(t, test)
+ _, tests, benchmarks, examples := ssa.FindTests(pkgs)
+ if len(tests) > 0 {
+ t.Errorf("FindTests.tests = %s, want none", tests)
+ }
+ if len(benchmarks) > 0 {
+ t.Errorf("FindTests.benchmarks = %s, want none", benchmarks)
+ }
+ sort.Sort(funcsByPos(examples))
+ if got, want := fmt.Sprint(examples), "[foo.Example foo.ExampleA]"; got != want {
+ t.Errorf("FindTests examples = %s, want %s", got, want)
+ }
+}
+
+type funcsByPos []*ssa.Function
+
+func (p funcsByPos) Len() int { return len(p) }
+func (p funcsByPos) Less(i, j int) bool { return p[i].Pos() < p[j].Pos() }
+func (p funcsByPos) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/llgo/third_party/go.tools/go/ssa/util.go b/llgo/third_party/go.tools/go/ssa/util.go
new file mode 100644
index 0000000000000000000000000000000000000000..d74fa297fb77a38b611c94059f776fa4cb1f583b
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/util.go
@@ -0,0 +1,136 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file defines a number of miscellaneous utility functions.
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "io"
+ "os"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+func unreachable() {
+ panic("unreachable")
+}
+
+//// AST utilities
+
+// unparen returns e with any enclosing parentheses stripped.
+func unparen(e ast.Expr) ast.Expr {
+ for {
+ p, ok := e.(*ast.ParenExpr)
+ if !ok {
+ break
+ }
+ e = p.X
+ }
+ return e
+}
+
+// isBlankIdent returns true iff e is an Ident with name "_".
+// They have no associated types.Object, and thus no type.
+//
+func isBlankIdent(e ast.Expr) bool {
+ id, ok := e.(*ast.Ident)
+ return ok && id.Name == "_"
+}
+
+//// Type utilities. Some of these belong in go/types.
+
+// isPointer returns true for types whose underlying type is a pointer.
+func isPointer(typ types.Type) bool {
+ _, ok := typ.Underlying().(*types.Pointer)
+ return ok
+}
+
+// isInterface reports whether T's underlying type is an interface.
+func isInterface(T types.Type) bool {
+ _, ok := T.Underlying().(*types.Interface)
+ return ok
+}
+
+// deref returns a pointer's element type; otherwise it returns typ.
+func deref(typ types.Type) types.Type {
+ if p, ok := typ.Underlying().(*types.Pointer); ok {
+ return p.Elem()
+ }
+ return typ
+}
+
+// recvType returns the receiver type of method obj.
+func recvType(obj *types.Func) types.Type {
+ return obj.Type().(*types.Signature).Recv().Type()
+}
+
+// DefaultType returns the default "typed" type for an "untyped" type;
+// it returns the incoming type for all other types. The default type
+// for untyped nil is untyped nil.
+//
+// Exported to ssa/interp.
+//
+// TODO(gri): this is a copy of go/types.defaultType; export that function.
+//
+func DefaultType(typ types.Type) types.Type {
+ if t, ok := typ.(*types.Basic); ok {
+ k := t.Kind()
+ switch k {
+ case types.UntypedBool:
+ k = types.Bool
+ case types.UntypedInt:
+ k = types.Int
+ case types.UntypedRune:
+ k = types.Rune
+ case types.UntypedFloat:
+ k = types.Float64
+ case types.UntypedComplex:
+ k = types.Complex128
+ case types.UntypedString:
+ k = types.String
+ }
+ typ = types.Typ[k]
+ }
+ return typ
+}
+
+// logStack prints the formatted "start" message to stderr and
+// returns a closure that prints the corresponding "end" message.
+// Call using 'defer logStack(...)()' to show builder stack on panic.
+// Don't forget trailing parens!
+//
+func logStack(format string, args ...interface{}) func() {
+ msg := fmt.Sprintf(format, args...)
+ io.WriteString(os.Stderr, msg)
+ io.WriteString(os.Stderr, "\n")
+ return func() {
+ io.WriteString(os.Stderr, msg)
+ io.WriteString(os.Stderr, " end\n")
+ }
+}
+
+// newVar creates a 'var' for use in a types.Tuple.
+func newVar(name string, typ types.Type) *types.Var {
+ return types.NewParam(token.NoPos, nil, name, typ)
+}
+
+// anonVar creates an anonymous 'var' for use in a types.Tuple.
+func anonVar(typ types.Type) *types.Var {
+ return newVar("", typ)
+}
+
+var lenResults = types.NewTuple(anonVar(tInt))
+
+// makeLen returns the len builtin specialized to type func(T)int.
+func makeLen(T types.Type) *Builtin {
+ lenParams := types.NewTuple(anonVar(T))
+ return &Builtin{
+ name: "len",
+ sig: types.NewSignature(nil, nil, lenParams, lenResults, false),
+ }
+}
diff --git a/llgo/third_party/go.tools/go/ssa/wrappers.go b/llgo/third_party/go.tools/go/ssa/wrappers.go
new file mode 100644
index 0000000000000000000000000000000000000000..09813be5b41718f66079256f2bf7ddaa9e69ef7c
--- /dev/null
+++ b/llgo/third_party/go.tools/go/ssa/wrappers.go
@@ -0,0 +1,287 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file defines synthesis of Functions that delegate to declared
+// methods, which come in three kinds:
+//
+// (1) wrappers: methods that wrap declared methods, performing
+// implicit pointer indirections and embedded field selections.
+//
+// (2) thunks: funcs that wrap declared methods. Like wrappers,
+// thunks perform indirections and field selections. The thunks's
+// first parameter is used as the receiver for the method call.
+//
+// (3) bounds: funcs that wrap declared methods. The bound's sole
+// free variable, supplied by a closure, is used as the receiver
+// for the method call. No indirections or field selections are
+// performed since they can be done before the call.
+
+import (
+ "fmt"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// -- wrappers -----------------------------------------------------------
+
+// makeWrapper returns a synthetic method that delegates to the
+// declared method denoted by meth.Obj(), first performing any
+// necessary pointer indirections or field selections implied by meth.
+//
+// The resulting method's receiver type is meth.Recv().
+//
+// This function is versatile but quite subtle! Consider the
+// following axes of variation when making changes:
+// - optional receiver indirection
+// - optional implicit field selections
+// - meth.Obj() may denote a concrete or an interface method
+// - the result may be a thunk or a wrapper.
+//
+// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
+//
+func makeWrapper(prog *Program, sel *types.Selection) *Function {
+ obj := sel.Obj().(*types.Func) // the declared function
+ sig := sel.Type().(*types.Signature) // type of this wrapper
+
+ var recv *types.Var // wrapper's receiver or thunk's params[0]
+ name := obj.Name()
+ var description string
+ var start int // first regular param
+ if sel.Kind() == types.MethodExpr {
+ name += "$thunk"
+ description = "thunk"
+ recv = sig.Params().At(0)
+ start = 1
+ } else {
+ description = "wrapper"
+ recv = sig.Recv()
+ }
+
+ description = fmt.Sprintf("%s for %s", description, sel.Obj())
+ if prog.mode&LogSource != 0 {
+ defer logStack("make %s to (%s)", description, recv.Type())()
+ }
+ fn := &Function{
+ name: name,
+ method: sel,
+ object: obj,
+ Signature: sig,
+ Synthetic: description,
+ Prog: prog,
+ pos: obj.Pos(),
+ }
+ fn.startBody()
+ fn.addSpilledParam(recv)
+ createParams(fn, start)
+
+ indices := sel.Index()
+
+ var v Value = fn.Locals[0] // spilled receiver
+ if isPointer(sel.Recv()) {
+ v = emitLoad(fn, v)
+
+ // For simple indirection wrappers, perform an informative nil-check:
+ // "value method (T).f called using nil *T pointer"
+ if len(indices) == 1 && !isPointer(recvType(obj)) {
+ var c Call
+ c.Call.Value = &Builtin{
+ name: "ssa:wrapnilchk",
+ sig: types.NewSignature(nil, nil,
+ types.NewTuple(anonVar(sel.Recv()), anonVar(tString), anonVar(tString)),
+ types.NewTuple(anonVar(sel.Recv())), false),
+ }
+ c.Call.Args = []Value{
+ v,
+ stringConst(deref(sel.Recv()).String()),
+ stringConst(sel.Obj().Name()),
+ }
+ c.setType(v.Type())
+ v = fn.emit(&c)
+ }
+ }
+
+ // Invariant: v is a pointer, either
+ // value of *A receiver param, or
+ // address of A spilled receiver.
+
+ // We use pointer arithmetic (FieldAddr possibly followed by
+ // Load) in preference to value extraction (Field possibly
+ // preceded by Load).
+
+ v = emitImplicitSelections(fn, v, indices[:len(indices)-1])
+
+ // Invariant: v is a pointer, either
+ // value of implicit *C field, or
+ // address of implicit C field.
+
+ var c Call
+ if r := recvType(obj); !isInterface(r) { // concrete method
+ if !isPointer(r) {
+ v = emitLoad(fn, v)
+ }
+ c.Call.Value = prog.declaredFunc(obj)
+ c.Call.Args = append(c.Call.Args, v)
+ } else {
+ c.Call.Method = obj
+ c.Call.Value = emitLoad(fn, v)
+ }
+ for _, arg := range fn.Params[1:] {
+ c.Call.Args = append(c.Call.Args, arg)
+ }
+ emitTailCall(fn, &c)
+ fn.finishBody()
+ return fn
+}
+
+// createParams creates parameters for wrapper method fn based on its
+// Signature.Params, which do not include the receiver.
+// start is the index of the first regular parameter to use.
+//
+func createParams(fn *Function, start int) {
+ var last *Parameter
+ tparams := fn.Signature.Params()
+ for i, n := start, tparams.Len(); i < n; i++ {
+ last = fn.addParamObj(tparams.At(i))
+ }
+ if fn.Signature.Variadic() {
+ last.typ = types.NewSlice(last.typ)
+ }
+}
+
+// -- bounds -----------------------------------------------------------
+
+// makeBound returns a bound method wrapper (or "bound"), a synthetic
+// function that delegates to a concrete or interface method denoted
+// by obj. The resulting function has no receiver, but has one free
+// variable which will be used as the method's receiver in the
+// tail-call.
+//
+// Use MakeClosure with such a wrapper to construct a bound method
+// closure. e.g.:
+//
+// type T int or: type T interface { meth() }
+// func (t T) meth()
+// var t T
+// f := t.meth
+// f() // calls t.meth()
+//
+// f is a closure of a synthetic wrapper defined as if by:
+//
+// f := func() { return t.meth() }
+//
+// Unlike makeWrapper, makeBound need perform no indirection or field
+// selections because that can be done before the closure is
+// constructed.
+//
+// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu)
+//
+func makeBound(prog *Program, obj *types.Func) *Function {
+ prog.methodsMu.Lock()
+ defer prog.methodsMu.Unlock()
+ fn, ok := prog.bounds[obj]
+ if !ok {
+ description := fmt.Sprintf("bound method wrapper for %s", obj)
+ if prog.mode&LogSource != 0 {
+ defer logStack("%s", description)()
+ }
+ fn = &Function{
+ name: obj.Name() + "$bound",
+ object: obj,
+ Signature: changeRecv(obj.Type().(*types.Signature), nil), // drop receiver
+ Synthetic: description,
+ Prog: prog,
+ pos: obj.Pos(),
+ }
+
+ fv := &FreeVar{name: "recv", typ: recvType(obj), parent: fn}
+ fn.FreeVars = []*FreeVar{fv}
+ fn.startBody()
+ createParams(fn, 0)
+ var c Call
+
+ if !isInterface(recvType(obj)) { // concrete
+ c.Call.Value = prog.declaredFunc(obj)
+ c.Call.Args = []Value{fv}
+ } else {
+ c.Call.Value = fv
+ c.Call.Method = obj
+ }
+ for _, arg := range fn.Params {
+ c.Call.Args = append(c.Call.Args, arg)
+ }
+ emitTailCall(fn, &c)
+ fn.finishBody()
+
+ prog.bounds[obj] = fn
+ }
+ return fn
+}
+
+// -- thunks -----------------------------------------------------------
+
+// makeThunk returns a thunk, a synthetic function that delegates to a
+// concrete or interface method denoted by sel.Obj(). The resulting
+// function has no receiver, but has an additional (first) regular
+// parameter.
+//
+// Precondition: sel.Kind() == types.MethodExpr.
+//
+// type T int or: type T interface { meth() }
+// func (t T) meth()
+// f := T.meth
+// var t T
+// f(t) // calls t.meth()
+//
+// f is a synthetic wrapper defined as if by:
+//
+// f := func(t T) { return t.meth() }
+//
+// TODO(adonovan): opt: currently the stub is created even when used
+// directly in a function call: C.f(i, 0). This is less efficient
+// than inlining the stub.
+//
+// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu)
+//
+func makeThunk(prog *Program, sel *types.Selection) *Function {
+ if sel.Kind() != types.MethodExpr {
+ panic(sel)
+ }
+
+ // TODO(adonovan): opt: canonicalize the recv Type to avoid
+ // construct unnecessary duplicate thunks.
+ key := selectionKey{
+ kind: sel.Kind(),
+ recv: sel.Recv(),
+ obj: sel.Obj(),
+ index: fmt.Sprint(sel.Index()),
+ indirect: sel.Indirect(),
+ }
+
+ prog.methodsMu.Lock()
+ defer prog.methodsMu.Unlock()
+ fn, ok := prog.thunks[key]
+ if !ok {
+ fn = makeWrapper(prog, sel)
+ if fn.Signature.Recv() != nil {
+ panic(fn) // unexpected receiver
+ }
+ prog.thunks[key] = fn
+ }
+ return fn
+}
+
+func changeRecv(s *types.Signature, recv *types.Var) *types.Signature {
+ return types.NewSignature(nil, recv, s.Params(), s.Results(), s.Variadic())
+}
+
+// selectionKey is like types.Selection but a usable map key.
+type selectionKey struct {
+ kind types.SelectionKind
+ recv types.Type
+ obj types.Object
+ index string
+ indirect bool
+}
diff --git a/llgo/third_party/go.tools/go/types/api.go b/llgo/third_party/go.tools/go/types/api.go
new file mode 100644
index 0000000000000000000000000000000000000000..9ae65023e95dac1407d222d3a585e4b139d056ec
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/api.go
@@ -0,0 +1,361 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package types declares the data types and implements
+// the algorithms for type-checking of Go packages.
+// Use Check and Config.Check to invoke the type-checker.
+//
+// Type-checking consists of several interdependent phases:
+//
+// Name resolution maps each identifier (ast.Ident) in the program to the
+// language object (Object) it denotes.
+// Use Info.{Defs,Uses,Implicits} for the results of name resolution.
+//
+// Constant folding computes the exact constant value (exact.Value) for
+// every expression (ast.Expr) that is a compile-time constant.
+// Use Info.Types[expr].Value for the results of constant folding.
+//
+// Type inference computes the type (Type) of every expression (ast.Expr)
+// and checks for compliance with the language specification.
+// Use Info.Types[expr].Type for the results of type inference.
+//
+package types
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+)
+
+// Check type-checks a package and returns the resulting complete package
+// object, or a nil package and the first error. The package is specified
+// by a list of *ast.Files and corresponding file set, and the import path
+// the package is identified with. The clean path must not be empty or dot (".").
+//
+// For more control over type-checking and results, use Config.Check.
+func Check(path string, fset *token.FileSet, files []*ast.File) (*Package, error) {
+ var conf Config
+ pkg, err := conf.Check(path, fset, files, nil)
+ if err != nil {
+ return nil, err
+ }
+ return pkg, nil
+}
+
+// An Error describes a type-checking error; it implements the error interface.
+// A "soft" error is an error that still permits a valid interpretation of a
+// package (such as "unused variable"); "hard" errors may lead to unpredictable
+// behavior if ignored.
+type Error struct {
+ Fset *token.FileSet // file set for interpretation of Pos
+ Pos token.Pos // error position
+ Msg string // error message
+ Soft bool // if set, error is "soft"
+}
+
+// Error returns an error string formatted as follows:
+// filename:line:column: message
+func (err Error) Error() string {
+ return fmt.Sprintf("%s: %s", err.Fset.Position(err.Pos), err.Msg)
+}
+
+// An importer resolves import paths to Packages.
+// The imports map records packages already known,
+// indexed by package path. The type-checker
+// will invoke Import with Config.Packages.
+// An importer must determine the canonical package path and
+// check imports to see if it is already present in the map.
+// If so, the Importer can return the map entry. Otherwise,
+// the importer must load the package data for the given path
+// into a new *Package, record it in imports map, and return
+// the package.
+// TODO(gri) Need to be clearer about requirements of completeness.
+type Importer func(map[string]*Package, string) (*Package, error)
+
+// A Config specifies the configuration for type checking.
+// The zero value for Config is a ready-to-use default configuration.
+type Config struct {
+ // If IgnoreFuncBodies is set, function bodies are not
+ // type-checked.
+ IgnoreFuncBodies bool
+
+ // If FakeImportC is set, `import "C"` (for packages requiring Cgo)
+ // declares an empty "C" package and errors are omitted for qualified
+ // identifiers referring to package C (which won't find an object).
+ // This feature is intended for the standard library cmd/api tool.
+ //
+ // Caution: Effects may be unpredictable due to follow-up errors.
+ // Do not use casually!
+ FakeImportC bool
+
+ // Packages is used to look up (and thus canonicalize) packages by
+ // package path. If Packages is nil, it is set to a new empty map.
+ // During type-checking, imported packages are added to the map.
+ Packages map[string]*Package
+
+ // If Error != nil, it is called with each error found
+ // during type checking; err has dynamic type Error.
+ // Secondary errors (for instance, to enumerate all types
+ // involved in an invalid recursive type declaration) have
+ // error strings that start with a '\t' character.
+ // If Error == nil, type-checking stops with the first
+ // error found.
+ Error func(err error)
+
+ // If Import != nil, it is called for each imported package.
+ // Otherwise, DefaultImport is called.
+ Import Importer
+
+ // If Sizes != nil, it provides the sizing functions for package unsafe.
+ // Otherwise &StdSizes{WordSize: 8, MaxAlign: 8} is used instead.
+ Sizes Sizes
+}
+
+// DefaultImport is the default importer invoked if Config.Import == nil.
+// The declaration:
+//
+// import _ "llvm.org/llgo/third_party/go.tools/go/gcimporter"
+//
+// in a client of go/types will initialize DefaultImport to gcimporter.Import.
+var DefaultImport Importer
+
+// Info holds result type information for a type-checked package.
+// Only the information for which a map is provided is collected.
+// If the package has type errors, the collected information may
+// be incomplete.
+type Info struct {
+ // Types maps expressions to their types, and for constant
+ // expressions, their values. Invalid expressions are omitted.
+ //
+ // For (possibly parenthesized) identifiers denoting built-in
+ // functions, the recorded signatures are call-site specific:
+ // if the call result is not a constant, the recorded type is
+ // an argument-specific signature. Otherwise, the recorded type
+ // is invalid.
+ //
+ // Identifiers on the lhs of declarations (i.e., the identifiers
+ // which are being declared) are collected in the Defs map.
+ // Identifiers denoting packages are collected in the Uses maps.
+ Types map[ast.Expr]TypeAndValue
+
+ // Defs maps identifiers to the objects they define (including
+ // package names, dots "." of dot-imports, and blank "_" identifiers).
+ // For identifiers that do not denote objects (e.g., the package name
+ // in package clauses, or symbolic variables t in t := x.(type) of
+ // type switch headers), the corresponding objects are nil.
+ //
+ // For an anonymous field, Defs returns the field *Var it defines.
+ //
+ // Invariant: Defs[id] == nil || Defs[id].Pos() == id.Pos()
+ Defs map[*ast.Ident]Object
+
+ // Uses maps identifiers to the objects they denote.
+ //
+ // For an anonymous field, Uses returns the *TypeName it denotes.
+ //
+ // Invariant: Uses[id].Pos() != id.Pos()
+ Uses map[*ast.Ident]Object
+
+ // Implicits maps nodes to their implicitly declared objects, if any.
+ // The following node and object types may appear:
+ //
+ // node declared object
+ //
+ // *ast.ImportSpec *PkgName for dot-imports and imports without renames
+ // *ast.CaseClause type-specific *Var for each type switch case clause (incl. default)
+ // *ast.Field anonymous struct field or parameter *Var
+ //
+ Implicits map[ast.Node]Object
+
+ // Selections maps selector expressions (excluding qualified identifiers)
+ // to their corresponding selections.
+ Selections map[*ast.SelectorExpr]*Selection
+
+ // Scopes maps ast.Nodes to the scopes they define. Package scopes are not
+ // associated with a specific node but with all files belonging to a package.
+ // Thus, the package scope can be found in the type-checked Package object.
+ // Scopes nest, with the Universe scope being the outermost scope, enclosing
+ // the package scope, which contains (one or more) files scopes, which enclose
+ // function scopes which in turn enclose statement and function literal scopes.
+ // Note that even though package-level functions are declared in the package
+ // scope, the function scopes are embedded in the file scope of the file
+ // containing the function declaration.
+ //
+ // The following node types may appear in Scopes:
+ //
+ // *ast.File
+ // *ast.FuncType
+ // *ast.BlockStmt
+ // *ast.IfStmt
+ // *ast.SwitchStmt
+ // *ast.TypeSwitchStmt
+ // *ast.CaseClause
+ // *ast.CommClause
+ // *ast.ForStmt
+ // *ast.RangeStmt
+ //
+ Scopes map[ast.Node]*Scope
+
+ // InitOrder is the list of package-level initializers in the order in which
+ // they must be executed. Initializers referring to variables related by an
+ // initialization dependency appear in topological order, the others appear
+ // in source order. Variables without an initialization expression do not
+ // appear in this list.
+ InitOrder []*Initializer
+}
+
+// TypeOf returns the type of expression e, or nil if not found.
+// Precondition: the Types, Uses and Defs maps are populated.
+//
+func (info *Info) TypeOf(e ast.Expr) Type {
+ if t, ok := info.Types[e]; ok {
+ return t.Type
+ }
+ if id, _ := e.(*ast.Ident); id != nil {
+ if obj := info.ObjectOf(id); obj != nil {
+ return obj.Type()
+ }
+ }
+ return nil
+}
+
+// ObjectOf returns the object denoted by the specified id,
+// or nil if not found.
+//
+// If id is an anonymous struct field, ObjectOf returns the field (*Var)
+// it uses, not the type (*TypeName) it defines.
+//
+// Precondition: the Uses and Defs maps are populated.
+//
+func (info *Info) ObjectOf(id *ast.Ident) Object {
+ if obj, _ := info.Defs[id]; obj != nil {
+ return obj
+ }
+ return info.Uses[id]
+}
+
+// TypeAndValue reports the type and value (for constants)
+// of the corresponding expression.
+type TypeAndValue struct {
+ mode operandMode
+ Type Type
+ Value exact.Value
+}
+
+// TODO(gri) Consider eliminating the IsVoid predicate. Instead, report
+// "void" values as regular values but with the empty tuple type.
+
+// IsVoid reports whether the corresponding expression
+// is a function call without results.
+func (tv TypeAndValue) IsVoid() bool {
+ return tv.mode == novalue
+}
+
+// IsType reports whether the corresponding expression specifies a type.
+func (tv TypeAndValue) IsType() bool {
+ return tv.mode == typexpr
+}
+
+// IsBuiltin reports whether the corresponding expression denotes
+// a (possibly parenthesized) built-in function.
+func (tv TypeAndValue) IsBuiltin() bool {
+ return tv.mode == builtin
+}
+
+// IsValue reports whether the corresponding expression is a value.
+// Builtins are not considered values. Constant values have a non-
+// nil Value.
+func (tv TypeAndValue) IsValue() bool {
+ switch tv.mode {
+ case constant, variable, mapindex, value, commaok:
+ return true
+ }
+ return false
+}
+
+// IsNil reports whether the corresponding expression denotes the
+// predeclared value nil.
+func (tv TypeAndValue) IsNil() bool {
+ return tv.mode == value && tv.Type == Typ[UntypedNil]
+}
+
+// Addressable reports whether the corresponding expression
+// is addressable (http://golang.org/ref/spec#Address_operators).
+func (tv TypeAndValue) Addressable() bool {
+ return tv.mode == variable
+}
+
+// Assignable reports whether the corresponding expression
+// is assignable to (provided a value of the right type).
+func (tv TypeAndValue) Assignable() bool {
+ return tv.mode == variable || tv.mode == mapindex
+}
+
+// HasOk reports whether the corresponding expression may be
+// used on the lhs of a comma-ok assignment.
+func (tv TypeAndValue) HasOk() bool {
+ return tv.mode == commaok || tv.mode == mapindex
+}
+
+// An Initializer describes a package-level variable, or a list of variables in case
+// of a multi-valued initialization expression, and the corresponding initialization
+// expression.
+type Initializer struct {
+ Lhs []*Var // var Lhs = Rhs
+ Rhs ast.Expr
+}
+
+func (init *Initializer) String() string {
+ var buf bytes.Buffer
+ for i, lhs := range init.Lhs {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(lhs.Name())
+ }
+ buf.WriteString(" = ")
+ WriteExpr(&buf, init.Rhs)
+ return buf.String()
+}
+
+// Check type-checks a package and returns the resulting package object,
+// the first error if any, and if info != nil, additional type information.
+// The package is marked as complete if no errors occurred, otherwise it is
+// incomplete. See Config.Error for controlling behavior in the presence of
+// errors.
+//
+// The package is specified by a list of *ast.Files and corresponding
+// file set, and the package path the package is identified with.
+// The clean path must not be empty or dot (".").
+func (conf *Config) Check(path string, fset *token.FileSet, files []*ast.File, info *Info) (*Package, error) {
+ pkg := NewPackage(path, "")
+ return pkg, NewChecker(conf, fset, pkg, info).Files(files)
+}
+
+// AssertableTo reports whether a value of type V can be asserted to have type T.
+func AssertableTo(V *Interface, T Type) bool {
+ m, _ := assertableTo(V, T)
+ return m == nil
+}
+
+// AssignableTo reports whether a value of type V is assignable to a variable of type T.
+func AssignableTo(V, T Type) bool {
+ x := operand{mode: value, typ: V}
+ return x.assignableTo(nil, T) // config not needed for non-constant x
+}
+
+// ConvertibleTo reports whether a value of type V is convertible to a value of type T.
+func ConvertibleTo(V, T Type) bool {
+ x := operand{mode: value, typ: V}
+ return x.convertibleTo(nil, T) // config not needed for non-constant x
+}
+
+// Implements reports whether type V implements interface T.
+func Implements(V Type, T *Interface) bool {
+ f, _ := MissingMethod(V, T, true)
+ return f == nil
+}
diff --git a/llgo/third_party/go.tools/go/types/api_test.go b/llgo/third_party/go.tools/go/types/api_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..4502b6bbbf55c82d4f32074e85205be031d7e440
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/api_test.go
@@ -0,0 +1,936 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types_test
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "strings"
+ "testing"
+
+ _ "llvm.org/llgo/third_party/go.tools/go/gcimporter"
+ . "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+func pkgFor(path, source string, info *Info) (*Package, error) {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, path, source, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ var conf Config
+ return conf.Check(f.Name.Name, fset, []*ast.File{f}, info)
+}
+
+func mustTypecheck(t *testing.T, path, source string, info *Info) string {
+ pkg, err := pkgFor(path, source, info)
+ if err != nil {
+ name := path
+ if pkg != nil {
+ name = "package " + pkg.Name()
+ }
+ t.Fatalf("%s: didn't type-check (%s)", name, err)
+ }
+ return pkg.Name()
+}
+
+func TestValuesInfo(t *testing.T) {
+ var tests = []struct {
+ src string
+ expr string // constant expression
+ typ string // constant type
+ val string // constant value
+ }{
+ {`package a0; const _ = false`, `false`, `untyped bool`, `false`},
+ {`package a1; const _ = 0`, `0`, `untyped int`, `0`},
+ {`package a2; const _ = 'A'`, `'A'`, `untyped rune`, `65`},
+ {`package a3; const _ = 0.`, `0.`, `untyped float`, `0`},
+ {`package a4; const _ = 0i`, `0i`, `untyped complex`, `0`},
+ {`package a5; const _ = "foo"`, `"foo"`, `untyped string`, `"foo"`},
+
+ {`package b0; var _ = false`, `false`, `bool`, `false`},
+ {`package b1; var _ = 0`, `0`, `int`, `0`},
+ {`package b2; var _ = 'A'`, `'A'`, `rune`, `65`},
+ {`package b3; var _ = 0.`, `0.`, `float64`, `0`},
+ {`package b4; var _ = 0i`, `0i`, `complex128`, `0`},
+ {`package b5; var _ = "foo"`, `"foo"`, `string`, `"foo"`},
+
+ {`package c0a; var _ = bool(false)`, `false`, `bool`, `false`},
+ {`package c0b; var _ = bool(false)`, `bool(false)`, `bool`, `false`},
+ {`package c0c; type T bool; var _ = T(false)`, `T(false)`, `c0c.T`, `false`},
+
+ {`package c1a; var _ = int(0)`, `0`, `int`, `0`},
+ {`package c1b; var _ = int(0)`, `int(0)`, `int`, `0`},
+ {`package c1c; type T int; var _ = T(0)`, `T(0)`, `c1c.T`, `0`},
+
+ {`package c2a; var _ = rune('A')`, `'A'`, `rune`, `65`},
+ {`package c2b; var _ = rune('A')`, `rune('A')`, `rune`, `65`},
+ {`package c2c; type T rune; var _ = T('A')`, `T('A')`, `c2c.T`, `65`},
+
+ {`package c3a; var _ = float32(0.)`, `0.`, `float32`, `0`},
+ {`package c3b; var _ = float32(0.)`, `float32(0.)`, `float32`, `0`},
+ {`package c3c; type T float32; var _ = T(0.)`, `T(0.)`, `c3c.T`, `0`},
+
+ {`package c4a; var _ = complex64(0i)`, `0i`, `complex64`, `0`},
+ {`package c4b; var _ = complex64(0i)`, `complex64(0i)`, `complex64`, `0`},
+ {`package c4c; type T complex64; var _ = T(0i)`, `T(0i)`, `c4c.T`, `0`},
+
+ {`package c5a; var _ = string("foo")`, `"foo"`, `string`, `"foo"`},
+ {`package c5b; var _ = string("foo")`, `string("foo")`, `string`, `"foo"`},
+ {`package c5c; type T string; var _ = T("foo")`, `T("foo")`, `c5c.T`, `"foo"`},
+
+ {`package d0; var _ = []byte("foo")`, `"foo"`, `string`, `"foo"`},
+ {`package d1; var _ = []byte(string("foo"))`, `"foo"`, `string`, `"foo"`},
+ {`package d2; var _ = []byte(string("foo"))`, `string("foo")`, `string`, `"foo"`},
+ {`package d3; type T []byte; var _ = T("foo")`, `"foo"`, `string`, `"foo"`},
+
+ {`package e0; const _ = float32( 1e-200)`, `float32(1e-200)`, `float32`, `0`},
+ {`package e1; const _ = float32(-1e-200)`, `float32(-1e-200)`, `float32`, `0`},
+ {`package e2; const _ = float64( 1e-2000)`, `float64(1e-2000)`, `float64`, `0`},
+ {`package e3; const _ = float64(-1e-2000)`, `float64(-1e-2000)`, `float64`, `0`},
+ {`package e4; const _ = complex64( 1e-200)`, `complex64(1e-200)`, `complex64`, `0`},
+ {`package e5; const _ = complex64(-1e-200)`, `complex64(-1e-200)`, `complex64`, `0`},
+ {`package e6; const _ = complex128( 1e-2000)`, `complex128(1e-2000)`, `complex128`, `0`},
+ {`package e7; const _ = complex128(-1e-2000)`, `complex128(-1e-2000)`, `complex128`, `0`},
+
+ {`package f0 ; var _ float32 = 1e-200`, `1e-200`, `float32`, `0`},
+ {`package f1 ; var _ float32 = -1e-200`, `-1e-200`, `float32`, `0`},
+ {`package f2a; var _ float64 = 1e-2000`, `1e-2000`, `float64`, `0`},
+ {`package f3a; var _ float64 = -1e-2000`, `-1e-2000`, `float64`, `0`},
+ {`package f2b; var _ = 1e-2000`, `1e-2000`, `float64`, `0`},
+ {`package f3b; var _ = -1e-2000`, `-1e-2000`, `float64`, `0`},
+ {`package f4 ; var _ complex64 = 1e-200 `, `1e-200`, `complex64`, `0`},
+ {`package f5 ; var _ complex64 = -1e-200 `, `-1e-200`, `complex64`, `0`},
+ {`package f6a; var _ complex128 = 1e-2000i`, `1e-2000i`, `complex128`, `0`},
+ {`package f7a; var _ complex128 = -1e-2000i`, `-1e-2000i`, `complex128`, `0`},
+ {`package f6b; var _ = 1e-2000i`, `1e-2000i`, `complex128`, `0`},
+ {`package f7b; var _ = -1e-2000i`, `-1e-2000i`, `complex128`, `0`},
+ }
+
+ for _, test := range tests {
+ info := Info{
+ Types: make(map[ast.Expr]TypeAndValue),
+ }
+ name := mustTypecheck(t, "ValuesInfo", test.src, &info)
+
+ // look for constant expression
+ var expr ast.Expr
+ for e := range info.Types {
+ if ExprString(e) == test.expr {
+ expr = e
+ break
+ }
+ }
+ if expr == nil {
+ t.Errorf("package %s: no expression found for %s", name, test.expr)
+ continue
+ }
+ tv := info.Types[expr]
+
+ // check that type is correct
+ if got := tv.Type.String(); got != test.typ {
+ t.Errorf("package %s: got type %s; want %s", name, got, test.typ)
+ continue
+ }
+
+ // check that value is correct
+ if got := tv.Value.String(); got != test.val {
+ t.Errorf("package %s: got value %s; want %s", name, got, test.val)
+ }
+ }
+}
+
+func TestTypesInfo(t *testing.T) {
+ var tests = []struct {
+ src string
+ expr string // expression
+ typ string // value type
+ }{
+ // single-valued expressions of untyped constants
+ {`package b0; var x interface{} = false`, `false`, `bool`},
+ {`package b1; var x interface{} = 0`, `0`, `int`},
+ {`package b2; var x interface{} = 0.`, `0.`, `float64`},
+ {`package b3; var x interface{} = 0i`, `0i`, `complex128`},
+ {`package b4; var x interface{} = "foo"`, `"foo"`, `string`},
+
+ // comma-ok expressions
+ {`package p0; var x interface{}; var _, _ = x.(int)`,
+ `x.(int)`,
+ `(int, bool)`,
+ },
+ {`package p1; var x interface{}; func _() { _, _ = x.(int) }`,
+ `x.(int)`,
+ `(int, bool)`,
+ },
+ // TODO(gri): uncomment if we accept issue 8189.
+ // {`package p2; type mybool bool; var m map[string]complex128; var b mybool; func _() { _, b = m["foo"] }`,
+ // `m["foo"]`,
+ // `(complex128, p2.mybool)`,
+ // },
+ // TODO(gri): remove if we accept issue 8189.
+ {`package p2; var m map[string]complex128; var b bool; func _() { _, b = m["foo"] }`,
+ `m["foo"]`,
+ `(complex128, bool)`,
+ },
+ {`package p3; var c chan string; var _, _ = <-c`,
+ `<-c`,
+ `(string, bool)`,
+ },
+
+ // issue 6796
+ {`package issue6796_a; var x interface{}; var _, _ = (x.(int))`,
+ `x.(int)`,
+ `(int, bool)`,
+ },
+ {`package issue6796_b; var c chan string; var _, _ = (<-c)`,
+ `(<-c)`,
+ `(string, bool)`,
+ },
+ {`package issue6796_c; var c chan string; var _, _ = (<-c)`,
+ `<-c`,
+ `(string, bool)`,
+ },
+ {`package issue6796_d; var c chan string; var _, _ = ((<-c))`,
+ `(<-c)`,
+ `(string, bool)`,
+ },
+ {`package issue6796_e; func f(c chan string) { _, _ = ((<-c)) }`,
+ `(<-c)`,
+ `(string, bool)`,
+ },
+
+ // issue 7060
+ {`package issue7060_a; var ( m map[int]string; x, ok = m[0] )`,
+ `m[0]`,
+ `(string, bool)`,
+ },
+ {`package issue7060_b; var ( m map[int]string; x, ok interface{} = m[0] )`,
+ `m[0]`,
+ `(string, bool)`,
+ },
+ {`package issue7060_c; func f(x interface{}, ok bool, m map[int]string) { x, ok = m[0] }`,
+ `m[0]`,
+ `(string, bool)`,
+ },
+ {`package issue7060_d; var ( ch chan string; x, ok = <-ch )`,
+ `<-ch`,
+ `(string, bool)`,
+ },
+ {`package issue7060_e; var ( ch chan string; x, ok interface{} = <-ch )`,
+ `<-ch`,
+ `(string, bool)`,
+ },
+ {`package issue7060_f; func f(x interface{}, ok bool, ch chan string) { x, ok = <-ch }`,
+ `<-ch`,
+ `(string, bool)`,
+ },
+ }
+
+ for _, test := range tests {
+ info := Info{Types: make(map[ast.Expr]TypeAndValue)}
+ name := mustTypecheck(t, "TypesInfo", test.src, &info)
+
+ // look for expression type
+ var typ Type
+ for e, tv := range info.Types {
+ if ExprString(e) == test.expr {
+ typ = tv.Type
+ break
+ }
+ }
+ if typ == nil {
+ t.Errorf("package %s: no type found for %s", name, test.expr)
+ continue
+ }
+
+ // check that type is correct
+ if got := typ.String(); got != test.typ {
+ t.Errorf("package %s: got %s; want %s", name, got, test.typ)
+ }
+ }
+}
+
+func predString(tv TypeAndValue) string {
+ var buf bytes.Buffer
+ pred := func(b bool, s string) {
+ if b {
+ if buf.Len() > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(s)
+ }
+ }
+
+ pred(tv.IsVoid(), "void")
+ pred(tv.IsType(), "type")
+ pred(tv.IsBuiltin(), "builtin")
+ pred(tv.IsValue() && tv.Value != nil, "const")
+ pred(tv.IsValue() && tv.Value == nil, "value")
+ pred(tv.IsNil(), "nil")
+ pred(tv.Addressable(), "addressable")
+ pred(tv.Assignable(), "assignable")
+ pred(tv.HasOk(), "hasOk")
+
+ if buf.Len() == 0 {
+ return "invalid"
+ }
+ return buf.String()
+}
+
+func TestPredicatesInfo(t *testing.T) {
+ var tests = []struct {
+ src string
+ expr string
+ pred string
+ }{
+ // void
+ {`package n0; func f() { f() }`, `f()`, `void`},
+
+ // types
+ {`package t0; type _ int`, `int`, `type`},
+ {`package t1; type _ []int`, `[]int`, `type`},
+ {`package t2; type _ func()`, `func()`, `type`},
+
+ // built-ins
+ {`package b0; var _ = len("")`, `len`, `builtin`},
+ {`package b1; var _ = (len)("")`, `(len)`, `builtin`},
+
+ // constants
+ {`package c0; var _ = 42`, `42`, `const`},
+ {`package c1; var _ = "foo" + "bar"`, `"foo" + "bar"`, `const`},
+ {`package c2; const (i = 1i; _ = i)`, `i`, `const`},
+
+ // values
+ {`package v0; var (a, b int; _ = a + b)`, `a + b`, `value`},
+ {`package v1; var _ = &[]int{1}`, `([]int literal)`, `value`},
+ {`package v2; var _ = func(){}`, `(func() literal)`, `value`},
+ {`package v4; func f() { _ = f }`, `f`, `value`},
+ {`package v3; var _ *int = nil`, `nil`, `value, nil`},
+ {`package v3; var _ *int = (nil)`, `(nil)`, `value, nil`},
+
+ // addressable (and thus assignable) operands
+ {`package a0; var (x int; _ = x)`, `x`, `value, addressable, assignable`},
+ {`package a1; var (p *int; _ = *p)`, `*p`, `value, addressable, assignable`},
+ {`package a2; var (s []int; _ = s[0])`, `s[0]`, `value, addressable, assignable`},
+ {`package a3; var (s struct{f int}; _ = s.f)`, `s.f`, `value, addressable, assignable`},
+ {`package a4; var (a [10]int; _ = a[0])`, `a[0]`, `value, addressable, assignable`},
+ {`package a5; func _(x int) { _ = x }`, `x`, `value, addressable, assignable`},
+ {`package a6; func _()(x int) { _ = x; return }`, `x`, `value, addressable, assignable`},
+ {`package a7; type T int; func (x T) _() { _ = x }`, `x`, `value, addressable, assignable`},
+ // composite literals are not addressable
+
+ // assignable but not addressable values
+ {`package s0; var (m map[int]int; _ = m[0])`, `m[0]`, `value, assignable, hasOk`},
+ {`package s1; var (m map[int]int; _, _ = m[0])`, `m[0]`, `value, assignable, hasOk`},
+
+ // hasOk expressions
+ {`package k0; var (ch chan int; _ = <-ch)`, `<-ch`, `value, hasOk`},
+ {`package k1; var (ch chan int; _, _ = <-ch)`, `<-ch`, `value, hasOk`},
+
+ // missing entries
+ // - package names are collected in the Uses map
+ // - identifiers being declared are collected in the Defs map
+ {`package m0; import "os"; func _() { _ = os.Stdout }`, `os`, ``},
+ {`package m1; import p "os"; func _() { _ = p.Stdout }`, `p`, ``},
+ {`package m2; const c = 0`, `c`, ``},
+ {`package m3; type T int`, `T`, ``},
+ {`package m4; var v int`, `v`, ``},
+ {`package m5; func f() {}`, `f`, ``},
+ {`package m6; func _(x int) {}`, `x`, ``},
+ {`package m6; func _()(x int) { return }`, `x`, ``},
+ {`package m6; type T int; func (x T) _() {}`, `x`, ``},
+ }
+
+ for _, test := range tests {
+ info := Info{Types: make(map[ast.Expr]TypeAndValue)}
+ name := mustTypecheck(t, "PredicatesInfo", test.src, &info)
+
+ // look for expression predicates
+ got := ""
+ for e, tv := range info.Types {
+ //println(name, ExprString(e))
+ if ExprString(e) == test.expr {
+ got = predString(tv)
+ break
+ }
+ }
+
+ if got != test.pred {
+ t.Errorf("package %s: got %s; want %s", name, got, test.pred)
+ }
+ }
+}
+
+func TestScopesInfo(t *testing.T) {
+ var tests = []struct {
+ src string
+ scopes []string // list of scope descriptors of the form kind:varlist
+ }{
+ {`package p0`, []string{
+ "file:",
+ }},
+ {`package p1; import ( "fmt"; m "math"; _ "os" ); var ( _ = fmt.Println; _ = m.Pi )`, []string{
+ "file:fmt m",
+ }},
+ {`package p2; func _() {}`, []string{
+ "file:", "func:",
+ }},
+ {`package p3; func _(x, y int) {}`, []string{
+ "file:", "func:x y",
+ }},
+ {`package p4; func _(x, y int) { x, z := 1, 2; _ = z }`, []string{
+ "file:", "func:x y z", // redeclaration of x
+ }},
+ {`package p5; func _(x, y int) (u, _ int) { return }`, []string{
+ "file:", "func:u x y",
+ }},
+ {`package p6; func _() { { var x int; _ = x } }`, []string{
+ "file:", "func:", "block:x",
+ }},
+ {`package p7; func _() { if true {} }`, []string{
+ "file:", "func:", "if:", "block:",
+ }},
+ {`package p8; func _() { if x := 0; x < 0 { y := x; _ = y } }`, []string{
+ "file:", "func:", "if:x", "block:y",
+ }},
+ {`package p9; func _() { switch x := 0; x {} }`, []string{
+ "file:", "func:", "switch:x",
+ }},
+ {`package p10; func _() { switch x := 0; x { case 1: y := x; _ = y; default: }}`, []string{
+ "file:", "func:", "switch:x", "case:y", "case:",
+ }},
+ {`package p11; func _(t interface{}) { switch t.(type) {} }`, []string{
+ "file:", "func:t", "type switch:",
+ }},
+ {`package p12; func _(t interface{}) { switch t := t; t.(type) {} }`, []string{
+ "file:", "func:t", "type switch:t",
+ }},
+ {`package p13; func _(t interface{}) { switch x := t.(type) { case int: _ = x } }`, []string{
+ "file:", "func:t", "type switch:", "case:x", // x implicitly declared
+ }},
+ {`package p14; func _() { select{} }`, []string{
+ "file:", "func:",
+ }},
+ {`package p15; func _(c chan int) { select{ case <-c: } }`, []string{
+ "file:", "func:c", "comm:",
+ }},
+ {`package p16; func _(c chan int) { select{ case i := <-c: x := i; _ = x} }`, []string{
+ "file:", "func:c", "comm:i x",
+ }},
+ {`package p17; func _() { for{} }`, []string{
+ "file:", "func:", "for:", "block:",
+ }},
+ {`package p18; func _(n int) { for i := 0; i < n; i++ { _ = i } }`, []string{
+ "file:", "func:n", "for:i", "block:",
+ }},
+ {`package p19; func _(a []int) { for i := range a { _ = i} }`, []string{
+ "file:", "func:a", "range:i", "block:",
+ }},
+ {`package p20; var s int; func _(a []int) { for i, x := range a { s += x; _ = i } }`, []string{
+ "file:", "func:a", "range:i x", "block:",
+ }},
+ }
+
+ for _, test := range tests {
+ info := Info{Scopes: make(map[ast.Node]*Scope)}
+ name := mustTypecheck(t, "ScopesInfo", test.src, &info)
+
+ // number of scopes must match
+ if len(info.Scopes) != len(test.scopes) {
+ t.Errorf("package %s: got %d scopes; want %d", name, len(info.Scopes), len(test.scopes))
+ }
+
+ // scope descriptions must match
+ for node, scope := range info.Scopes {
+ kind := ""
+ switch node.(type) {
+ case *ast.File:
+ kind = "file"
+ case *ast.FuncType:
+ kind = "func"
+ case *ast.BlockStmt:
+ kind = "block"
+ case *ast.IfStmt:
+ kind = "if"
+ case *ast.SwitchStmt:
+ kind = "switch"
+ case *ast.TypeSwitchStmt:
+ kind = "type switch"
+ case *ast.CaseClause:
+ kind = "case"
+ case *ast.CommClause:
+ kind = "comm"
+ case *ast.ForStmt:
+ kind = "for"
+ case *ast.RangeStmt:
+ kind = "range"
+ }
+
+ // look for matching scope description
+ desc := kind + ":" + strings.Join(scope.Names(), " ")
+ found := false
+ for _, d := range test.scopes {
+ if desc == d {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("package %s: no matching scope found for %s", name, desc)
+ }
+ }
+ }
+}
+
+func TestInitOrderInfo(t *testing.T) {
+ var tests = []struct {
+ src string
+ inits []string
+ }{
+ {`package p0; var (x = 1; y = x)`, []string{
+ "x = 1", "y = x",
+ }},
+ {`package p1; var (a = 1; b = 2; c = 3)`, []string{
+ "a = 1", "b = 2", "c = 3",
+ }},
+ {`package p2; var (a, b, c = 1, 2, 3)`, []string{
+ "a = 1", "b = 2", "c = 3",
+ }},
+ {`package p3; var _ = f(); func f() int { return 1 }`, []string{
+ "_ = f()", // blank var
+ }},
+ {`package p4; var (a = 0; x = y; y = z; z = 0)`, []string{
+ "a = 0", "z = 0", "y = z", "x = y",
+ }},
+ {`package p5; var (a, _ = m[0]; m map[int]string)`, []string{
+ "a, _ = m[0]", // blank var
+ }},
+ {`package p6; var a, b = f(); func f() (_, _ int) { return z, z }; var z = 0`, []string{
+ "z = 0", "a, b = f()",
+ }},
+ {`package p7; var (a = func() int { return b }(); b = 1)`, []string{
+ "b = 1", "a = (func() int literal)()",
+ }},
+ {`package p8; var (a, b = func() (_, _ int) { return c, c }(); c = 1)`, []string{
+ "c = 1", "a, b = (func() (_, _ int) literal)()",
+ }},
+ {`package p9; type T struct{}; func (T) m() int { _ = y; return 0 }; var x, y = T.m, 1`, []string{
+ "y = 1", "x = T.m",
+ }},
+ {`package p10; var (d = c + b; a = 0; b = 0; c = 0)`, []string{
+ "a = 0", "b = 0", "c = 0", "d = c + b",
+ }},
+ {`package p11; var (a = e + c; b = d + c; c = 0; d = 0; e = 0)`, []string{
+ "c = 0", "d = 0", "b = d + c", "e = 0", "a = e + c",
+ }},
+ // emit an initializer for n:1 initializations only once (not for each node
+ // on the lhs which may appear in different order in the dependency graph)
+ {`package p12; var (a = x; b = 0; x, y = m[0]; m map[int]int)`, []string{
+ "b = 0", "x, y = m[0]", "a = x",
+ }},
+ // test case from spec section on package initialization
+ {`package p12
+
+ var (
+ a = c + b
+ b = f()
+ c = f()
+ d = 3
+ )
+
+ func f() int {
+ d++
+ return d
+ }`, []string{
+ "d = 3", "b = f()", "c = f()", "a = c + b",
+ }},
+ // test case for issue 7131
+ {`package main
+
+ var counter int
+ func next() int { counter++; return counter }
+
+ var _ = makeOrder()
+ func makeOrder() []int { return []int{f, b, d, e, c, a} }
+
+ var a = next()
+ var b, c = next(), next()
+ var d, e, f = next(), next(), next()
+ `, []string{
+ "a = next()", "b = next()", "c = next()", "d = next()", "e = next()", "f = next()", "_ = makeOrder()",
+ }},
+ }
+
+ for _, test := range tests {
+ info := Info{}
+ name := mustTypecheck(t, "InitOrderInfo", test.src, &info)
+
+ // number of initializers must match
+ if len(info.InitOrder) != len(test.inits) {
+ t.Errorf("package %s: got %d initializers; want %d", name, len(info.InitOrder), len(test.inits))
+ continue
+ }
+
+ // initializers must match
+ for i, want := range test.inits {
+ got := info.InitOrder[i].String()
+ if got != want {
+ t.Errorf("package %s, init %d: got %s; want %s", name, i, got, want)
+ continue
+ }
+ }
+ }
+}
+
+func TestMultiFileInitOrder(t *testing.T) {
+ fset := token.NewFileSet()
+ mustParse := func(src string) *ast.File {
+ f, err := parser.ParseFile(fset, "main", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return f
+ }
+
+ fileA := mustParse(`package main; var a = 1`)
+ fileB := mustParse(`package main; var b = 2`)
+
+ // The initialization order must not depend on the parse
+ // order of the files, only on the presentation order to
+ // the type-checker.
+ for _, test := range []struct {
+ files []*ast.File
+ want string
+ }{
+ {[]*ast.File{fileA, fileB}, "[a = 1 b = 2]"},
+ {[]*ast.File{fileB, fileA}, "[b = 2 a = 1]"},
+ } {
+ var info Info
+ if _, err := new(Config).Check("main", fset, test.files, &info); err != nil {
+ t.Fatal(err)
+ }
+ if got := fmt.Sprint(info.InitOrder); got != test.want {
+ t.Fatalf("got %s; want %s", got, test.want)
+ }
+ }
+}
+
+func TestFiles(t *testing.T) {
+ var sources = []string{
+ "package p; type T struct{}; func (T) m1() {}",
+ "package p; func (T) m2() {}; var x interface{ m1(); m2() } = T{}",
+ "package p; func (T) m3() {}; var y interface{ m1(); m2(); m3() } = T{}",
+ "package p",
+ }
+
+ var conf Config
+ fset := token.NewFileSet()
+ pkg := NewPackage("p", "p")
+ var info Info
+ check := NewChecker(&conf, fset, pkg, &info)
+
+ for i, src := range sources {
+ filename := fmt.Sprintf("sources%d", i)
+ f, err := parser.ParseFile(fset, filename, src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := check.Files([]*ast.File{f}); err != nil {
+ t.Error(err)
+ }
+ }
+
+ // check InitOrder is [x y]
+ var vars []string
+ for _, init := range info.InitOrder {
+ for _, v := range init.Lhs {
+ vars = append(vars, v.Name())
+ }
+ }
+ if got, want := fmt.Sprint(vars), "[x y]"; got != want {
+ t.Errorf("InitOrder == %s, want %s", got, want)
+ }
+}
+
+func TestSelection(t *testing.T) {
+ selections := make(map[*ast.SelectorExpr]*Selection)
+
+ fset := token.NewFileSet()
+ conf := Config{
+ Packages: make(map[string]*Package),
+ Import: func(imports map[string]*Package, path string) (*Package, error) {
+ return imports[path], nil
+ },
+ }
+ makePkg := func(path, src string) {
+ f, err := parser.ParseFile(fset, path+".go", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pkg, err := conf.Check(path, fset, []*ast.File{f}, &Info{Selections: selections})
+ if err != nil {
+ t.Fatal(err)
+ }
+ conf.Packages[path] = pkg
+ }
+
+ const libSrc = `
+package lib
+type T float64
+const C T = 3
+var V T
+func F() {}
+func (T) M() {}
+`
+ const mainSrc = `
+package main
+import "lib"
+
+type A struct {
+ *B
+ C
+}
+
+type B struct {
+ b int
+}
+
+func (B) f(int)
+
+type C struct {
+ c int
+}
+
+func (C) g()
+func (*C) h()
+
+func main() {
+ // qualified identifiers
+ var _ lib.T
+ _ = lib.C
+ _ = lib.F
+ _ = lib.V
+ _ = lib.T.M
+
+ // fields
+ _ = A{}.B
+ _ = new(A).B
+
+ _ = A{}.C
+ _ = new(A).C
+
+ _ = A{}.b
+ _ = new(A).b
+
+ _ = A{}.c
+ _ = new(A).c
+
+ // methods
+ _ = A{}.f
+ _ = new(A).f
+ _ = A{}.g
+ _ = new(A).g
+ _ = new(A).h
+
+ _ = B{}.f
+ _ = new(B).f
+
+ _ = C{}.g
+ _ = new(C).g
+ _ = new(C).h
+
+ // method expressions
+ _ = A.f
+ _ = (*A).f
+ _ = B.f
+ _ = (*B).f
+}`
+
+ wantOut := map[string][2]string{
+ "lib.T.M": {"method expr (lib.T) M(lib.T)", ".[0]"},
+
+ "A{}.B": {"field (main.A) B *main.B", ".[0]"},
+ "new(A).B": {"field (*main.A) B *main.B", "->[0]"},
+ "A{}.C": {"field (main.A) C main.C", ".[1]"},
+ "new(A).C": {"field (*main.A) C main.C", "->[1]"},
+ "A{}.b": {"field (main.A) b int", "->[0 0]"},
+ "new(A).b": {"field (*main.A) b int", "->[0 0]"},
+ "A{}.c": {"field (main.A) c int", ".[1 0]"},
+ "new(A).c": {"field (*main.A) c int", "->[1 0]"},
+
+ "A{}.f": {"method (main.A) f(int)", "->[0 0]"},
+ "new(A).f": {"method (*main.A) f(int)", "->[0 0]"},
+ "A{}.g": {"method (main.A) g()", ".[1 0]"},
+ "new(A).g": {"method (*main.A) g()", "->[1 0]"},
+ "new(A).h": {"method (*main.A) h()", "->[1 1]"}, // TODO(gri) should this report .[1 1] ?
+ "B{}.f": {"method (main.B) f(int)", ".[0]"},
+ "new(B).f": {"method (*main.B) f(int)", "->[0]"},
+ "C{}.g": {"method (main.C) g()", ".[0]"},
+ "new(C).g": {"method (*main.C) g()", "->[0]"},
+ "new(C).h": {"method (*main.C) h()", "->[1]"}, // TODO(gri) should this report .[1] ?
+
+ "A.f": {"method expr (main.A) f(main.A, int)", "->[0 0]"},
+ "(*A).f": {"method expr (*main.A) f(*main.A, int)", "->[0 0]"},
+ "B.f": {"method expr (main.B) f(main.B, int)", ".[0]"},
+ "(*B).f": {"method expr (*main.B) f(*main.B, int)", "->[0]"},
+ }
+
+ makePkg("lib", libSrc)
+ makePkg("main", mainSrc)
+
+ for e, sel := range selections {
+ sel.String() // assertion: must not panic
+
+ start := fset.Position(e.Pos()).Offset
+ end := fset.Position(e.End()).Offset
+ syntax := mainSrc[start:end] // (all SelectorExprs are in main, not lib)
+
+ direct := "."
+ if sel.Indirect() {
+ direct = "->"
+ }
+ got := [2]string{
+ sel.String(),
+ fmt.Sprintf("%s%v", direct, sel.Index()),
+ }
+ want := wantOut[syntax]
+ if want != got {
+ t.Errorf("%s: got %q; want %q", syntax, got, want)
+ }
+ delete(wantOut, syntax)
+
+ // We must explicitly assert properties of the
+ // Signature's receiver since it doesn't participate
+ // in Identical() or String().
+ sig, _ := sel.Type().(*Signature)
+ if sel.Kind() == MethodVal {
+ got := sig.Recv().Type()
+ want := sel.Recv()
+ if !Identical(got, want) {
+ t.Errorf("%s: Recv() = %s, want %s", got, want)
+ }
+ } else if sig != nil && sig.Recv() != nil {
+ t.Error("%s: signature has receiver %s", sig, sig.Recv().Type())
+ }
+ }
+ // Assert that all wantOut entries were used exactly once.
+ for syntax := range wantOut {
+ t.Errorf("no ast.Selection found with syntax %q", syntax)
+ }
+}
+
+func TestIssue8518(t *testing.T) {
+ fset := token.NewFileSet()
+ conf := Config{
+ Packages: make(map[string]*Package),
+ Error: func(err error) { t.Log(err) }, // don't exit after first error
+ Import: func(imports map[string]*Package, path string) (*Package, error) {
+ return imports[path], nil
+ },
+ }
+ makePkg := func(path, src string) {
+ f, err := parser.ParseFile(fset, path, src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pkg, _ := conf.Check(path, fset, []*ast.File{f}, nil) // errors logged via conf.Error
+ conf.Packages[path] = pkg
+ }
+
+ const libSrc = `
+package a
+import "missing"
+const C1 = foo
+const C2 = missing.C
+`
+
+ const mainSrc = `
+package main
+import "a"
+var _ = a.C1
+var _ = a.C2
+`
+
+ makePkg("a", libSrc)
+ makePkg("main", mainSrc) // don't crash when type-checking this package
+}
+
+func TestLookupFieldOrMethod(t *testing.T) {
+ // Test cases assume a lookup of the form a.f or x.f, where a stands for an
+ // addressable value, and x for a non-addressable value (even though a variable
+ // for ease of test case writing).
+ var tests = []struct {
+ src string
+ found bool
+ index []int
+ indirect bool
+ }{
+ // field lookups
+ {"var x T; type T struct{}", false, nil, false},
+ {"var x T; type T struct{ f int }", true, []int{0}, false},
+ {"var x T; type T struct{ a, b, f, c int }", true, []int{2}, false},
+
+ // method lookups
+ {"var a T; type T struct{}; func (T) f() {}", true, []int{0}, false},
+ {"var a *T; type T struct{}; func (T) f() {}", true, []int{0}, true},
+ {"var a T; type T struct{}; func (*T) f() {}", true, []int{0}, false},
+ {"var a *T; type T struct{}; func (*T) f() {}", true, []int{0}, true}, // TODO(gri) should this report indirect = false?
+
+ // collisions
+ {"type ( E1 struct{ f int }; E2 struct{ f int }; x struct{ E1; *E2 })", false, []int{1, 0}, false},
+ {"type ( E1 struct{ f int }; E2 struct{}; x struct{ E1; *E2 }); func (E2) f() {}", false, []int{1, 0}, false},
+
+ // outside methodset
+ // (*T).f method exists, but value of type T is not addressable
+ {"var x T; type T struct{}; func (*T) f() {}", false, nil, true},
+ }
+
+ for _, test := range tests {
+ pkg, err := pkgFor("test", "package p;"+test.src, nil)
+ if err != nil {
+ t.Errorf("%s: incorrect test case: %s", test.src, err)
+ continue
+ }
+
+ obj := pkg.Scope().Lookup("a")
+ if obj == nil {
+ if obj = pkg.Scope().Lookup("x"); obj == nil {
+ t.Errorf("%s: incorrect test case - no object a or x", test.src)
+ continue
+ }
+ }
+
+ f, index, indirect := LookupFieldOrMethod(obj.Type(), obj.Name() == "a", pkg, "f")
+ if (f != nil) != test.found {
+ if f == nil {
+ t.Errorf("%s: got no object; want one", test.src)
+ } else {
+ t.Errorf("%s: got object = %v; want none", test.src, f)
+ }
+ }
+ if !sameSlice(index, test.index) {
+ t.Errorf("%s: got index = %v; want %v", test.src, index, test.index)
+ }
+ if indirect != test.indirect {
+ t.Errorf("%s: got indirect = %v; want %v", test.src, indirect, test.indirect)
+ }
+ }
+}
+
+func sameSlice(a, b []int) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i, x := range a {
+ if x != b[i] {
+ return false
+ }
+ }
+ return true
+}
diff --git a/llgo/third_party/go.tools/go/types/assignments.go b/llgo/third_party/go.tools/go/types/assignments.go
new file mode 100644
index 0000000000000000000000000000000000000000..7ee1abcc9892c2c1fea17013dfe023805593f292
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/assignments.go
@@ -0,0 +1,323 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements initialization and assignment checks.
+
+package types
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+// assignment reports whether x can be assigned to a variable of type T,
+// if necessary by attempting to convert untyped values to the appropriate
+// type. If x.mode == invalid upon return, then assignment has already
+// issued an error message and the caller doesn't have to report another.
+// Use T == nil to indicate assignment to an untyped blank identifier.
+//
+// TODO(gri) Should find a better way to handle in-band errors.
+//
+func (check *Checker) assignment(x *operand, T Type) bool {
+ switch x.mode {
+ case invalid:
+ return true // error reported before
+ case constant, variable, mapindex, value, commaok:
+ // ok
+ default:
+ unreachable()
+ }
+
+ // x must be a single value
+ // (tuple types are never named - no need for underlying type)
+ if t, _ := x.typ.(*Tuple); t != nil {
+ assert(t.Len() > 1)
+ check.errorf(x.pos(), "%d-valued expression %s used as single value", t.Len(), x)
+ x.mode = invalid
+ return false
+ }
+
+ if isUntyped(x.typ) {
+ target := T
+ // spec: "If an untyped constant is assigned to a variable of interface
+ // type or the blank identifier, the constant is first converted to type
+ // bool, rune, int, float64, complex128 or string respectively, depending
+ // on whether the value is a boolean, rune, integer, floating-point, complex,
+ // or string constant."
+ if T == nil || isInterface(T) {
+ if T == nil && x.typ == Typ[UntypedNil] {
+ check.errorf(x.pos(), "use of untyped nil")
+ x.mode = invalid
+ return false
+ }
+ target = defaultType(x.typ)
+ }
+ check.convertUntyped(x, target)
+ if x.mode == invalid {
+ return false
+ }
+ }
+
+ // spec: "If a left-hand side is the blank identifier, any typed or
+ // non-constant value except for the predeclared identifier nil may
+ // be assigned to it."
+ return T == nil || x.assignableTo(check.conf, T)
+}
+
+func (check *Checker) initConst(lhs *Const, x *operand) {
+ if x.mode == invalid || x.typ == Typ[Invalid] || lhs.typ == Typ[Invalid] {
+ if lhs.typ == nil {
+ lhs.typ = Typ[Invalid]
+ }
+ return
+ }
+
+ // rhs must be a constant
+ if x.mode != constant {
+ check.errorf(x.pos(), "%s is not constant", x)
+ if lhs.typ == nil {
+ lhs.typ = Typ[Invalid]
+ }
+ return
+ }
+ assert(isConstType(x.typ))
+
+ // If the lhs doesn't have a type yet, use the type of x.
+ if lhs.typ == nil {
+ lhs.typ = x.typ
+ }
+
+ if !check.assignment(x, lhs.typ) {
+ if x.mode != invalid {
+ check.errorf(x.pos(), "cannot define constant %s (type %s) as %s", lhs.Name(), lhs.typ, x)
+ }
+ return
+ }
+
+ lhs.val = x.val
+}
+
+// If result is set, lhs is a function result parameter and x is a return result.
+func (check *Checker) initVar(lhs *Var, x *operand, result bool) Type {
+ if x.mode == invalid || x.typ == Typ[Invalid] || lhs.typ == Typ[Invalid] {
+ if lhs.typ == nil {
+ lhs.typ = Typ[Invalid]
+ }
+ return nil
+ }
+
+ // If the lhs doesn't have a type yet, use the type of x.
+ if lhs.typ == nil {
+ typ := x.typ
+ if isUntyped(typ) {
+ // convert untyped types to default types
+ if typ == Typ[UntypedNil] {
+ check.errorf(x.pos(), "use of untyped nil")
+ lhs.typ = Typ[Invalid]
+ return nil
+ }
+ typ = defaultType(typ)
+ }
+ lhs.typ = typ
+ }
+
+ if !check.assignment(x, lhs.typ) {
+ if x.mode != invalid {
+ if result {
+ // don't refer to lhs.name because it may be an anonymous result parameter
+ check.errorf(x.pos(), "cannot return %s as value of type %s", x, lhs.typ)
+ } else {
+ check.errorf(x.pos(), "cannot initialize %s with %s", lhs, x)
+ }
+ }
+ return nil
+ }
+
+ return x.typ
+}
+
+func (check *Checker) assignVar(lhs ast.Expr, x *operand) Type {
+ if x.mode == invalid || x.typ == Typ[Invalid] {
+ return nil
+ }
+
+ // Determine if the lhs is a (possibly parenthesized) identifier.
+ ident, _ := unparen(lhs).(*ast.Ident)
+
+ // Don't evaluate lhs if it is the blank identifier.
+ if ident != nil && ident.Name == "_" {
+ check.recordDef(ident, nil)
+ if !check.assignment(x, nil) {
+ assert(x.mode == invalid)
+ x.typ = nil
+ }
+ return x.typ
+ }
+
+ // If the lhs is an identifier denoting a variable v, this assignment
+ // is not a 'use' of v. Remember current value of v.used and restore
+ // after evaluating the lhs via check.expr.
+ var v *Var
+ var v_used bool
+ if ident != nil {
+ if _, obj := check.scope.LookupParent(ident.Name); obj != nil {
+ v, _ = obj.(*Var)
+ if v != nil {
+ v_used = v.used
+ }
+ }
+ }
+
+ var z operand
+ check.expr(&z, lhs)
+ if v != nil {
+ v.used = v_used // restore v.used
+ }
+
+ if z.mode == invalid || z.typ == Typ[Invalid] {
+ return nil
+ }
+
+ // spec: "Each left-hand side operand must be addressable, a map index
+ // expression, or the blank identifier. Operands may be parenthesized."
+ switch z.mode {
+ case invalid:
+ return nil
+ case variable, mapindex:
+ // ok
+ default:
+ check.errorf(z.pos(), "cannot assign to %s", &z)
+ return nil
+ }
+
+ if !check.assignment(x, z.typ) {
+ if x.mode != invalid {
+ check.errorf(x.pos(), "cannot assign %s to %s", x, &z)
+ }
+ return nil
+ }
+
+ return x.typ
+}
+
+// If returnPos is valid, initVars is called to type-check the assignment of
+// return expressions, and returnPos is the position of the return statement.
+func (check *Checker) initVars(lhs []*Var, rhs []ast.Expr, returnPos token.Pos) {
+ l := len(lhs)
+ get, r, commaOk := unpack(func(x *operand, i int) { check.expr(x, rhs[i]) }, len(rhs), l == 2 && !returnPos.IsValid())
+ if get == nil || l != r {
+ // invalidate lhs and use rhs
+ for _, obj := range lhs {
+ if obj.typ == nil {
+ obj.typ = Typ[Invalid]
+ }
+ }
+ if get == nil {
+ return // error reported by unpack
+ }
+ check.useGetter(get, r)
+ if returnPos.IsValid() {
+ check.errorf(returnPos, "wrong number of return values (want %d, got %d)", l, r)
+ return
+ }
+ check.errorf(rhs[0].Pos(), "assignment count mismatch (%d vs %d)", l, r)
+ return
+ }
+
+ var x operand
+ if commaOk {
+ var a [2]Type
+ for i := range a {
+ get(&x, i)
+ a[i] = check.initVar(lhs[i], &x, returnPos.IsValid())
+ }
+ check.recordCommaOkTypes(rhs[0], a)
+ return
+ }
+
+ for i, lhs := range lhs {
+ get(&x, i)
+ check.initVar(lhs, &x, returnPos.IsValid())
+ }
+}
+
+func (check *Checker) assignVars(lhs, rhs []ast.Expr) {
+ l := len(lhs)
+ get, r, commaOk := unpack(func(x *operand, i int) { check.expr(x, rhs[i]) }, len(rhs), l == 2)
+ if get == nil {
+ return // error reported by unpack
+ }
+ if l != r {
+ check.useGetter(get, r)
+ check.errorf(rhs[0].Pos(), "assignment count mismatch (%d vs %d)", l, r)
+ return
+ }
+
+ var x operand
+ if commaOk {
+ var a [2]Type
+ for i := range a {
+ get(&x, i)
+ a[i] = check.assignVar(lhs[i], &x)
+ }
+ check.recordCommaOkTypes(rhs[0], a)
+ return
+ }
+
+ for i, lhs := range lhs {
+ get(&x, i)
+ check.assignVar(lhs, &x)
+ }
+}
+
+func (check *Checker) shortVarDecl(pos token.Pos, lhs, rhs []ast.Expr) {
+ scope := check.scope
+
+ // collect lhs variables
+ var newVars []*Var
+ var lhsVars = make([]*Var, len(lhs))
+ for i, lhs := range lhs {
+ var obj *Var
+ if ident, _ := lhs.(*ast.Ident); ident != nil {
+ // Use the correct obj if the ident is redeclared. The
+ // variable's scope starts after the declaration; so we
+ // must use Scope.Lookup here and call Scope.Insert
+ // (via check.declare) later.
+ name := ident.Name
+ if alt := scope.Lookup(name); alt != nil {
+ // redeclared object must be a variable
+ if alt, _ := alt.(*Var); alt != nil {
+ obj = alt
+ } else {
+ check.errorf(lhs.Pos(), "cannot assign to %s", lhs)
+ }
+ check.recordUse(ident, alt)
+ } else {
+ // declare new variable, possibly a blank (_) variable
+ obj = NewVar(ident.Pos(), check.pkg, name, nil)
+ if name != "_" {
+ newVars = append(newVars, obj)
+ }
+ check.recordDef(ident, obj)
+ }
+ } else {
+ check.errorf(lhs.Pos(), "cannot declare %s", lhs)
+ }
+ if obj == nil {
+ obj = NewVar(lhs.Pos(), check.pkg, "_", nil) // dummy variable
+ }
+ lhsVars[i] = obj
+ }
+
+ check.initVars(lhsVars, rhs, token.NoPos)
+
+ // declare new variables
+ if len(newVars) > 0 {
+ for _, obj := range newVars {
+ check.declare(scope, nil, obj) // recordObject already called
+ }
+ } else {
+ check.softErrorf(pos, "no new variables on left side of :=")
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/builtins.go b/llgo/third_party/go.tools/go/types/builtins.go
new file mode 100644
index 0000000000000000000000000000000000000000..ce2ba35027756b3b512eec577418070f767b9076
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/builtins.go
@@ -0,0 +1,628 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of builtin function calls.
+
+package types
+
+import (
+ "go/ast"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+)
+
+// builtin type-checks a call to the built-in specified by id and
+// returns true if the call is valid, with *x holding the result;
+// but x.expr is not set. If the call is invalid, the result is
+// false, and *x is undefined.
+//
+func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ bool) {
+ // append is the only built-in that permits the use of ... for the last argument
+ bin := predeclaredFuncs[id]
+ if call.Ellipsis.IsValid() && id != _Append {
+ check.invalidOp(call.Ellipsis, "invalid use of ... with built-in %s", bin.name)
+ check.use(call.Args...)
+ return
+ }
+
+ // For len(x) and cap(x) we need to know if x contains any function calls or
+ // receive operations. Save/restore current setting and set hasCallOrRecv to
+ // false for the evaluation of x so that we can check it afterwards.
+ // Note: We must do this _before_ calling unpack because unpack evaluates the
+ // first argument before we even call arg(x, 0)!
+ if id == _Len || id == _Cap {
+ defer func(b bool) {
+ check.hasCallOrRecv = b
+ }(check.hasCallOrRecv)
+ check.hasCallOrRecv = false
+ }
+
+ // determine actual arguments
+ var arg getter
+ nargs := len(call.Args)
+ switch id {
+ default:
+ // make argument getter
+ arg, nargs, _ = unpack(func(x *operand, i int) { check.expr(x, call.Args[i]) }, nargs, false)
+ if arg == nil {
+ x.mode = invalid
+ return
+ }
+ // evaluate first argument, if present
+ if nargs > 0 {
+ arg(x, 0)
+ if x.mode == invalid {
+ return
+ }
+ }
+ case _Make, _New, _Offsetof, _Trace:
+ // arguments require special handling
+ }
+
+ // check argument count
+ {
+ msg := ""
+ if nargs < bin.nargs {
+ msg = "not enough"
+ } else if !bin.variadic && nargs > bin.nargs {
+ msg = "too many"
+ }
+ if msg != "" {
+ check.invalidOp(call.Rparen, "%s arguments for %s (expected %d, found %d)", msg, call, bin.nargs, nargs)
+ return
+ }
+ }
+
+ switch id {
+ case _Append:
+ // append(s S, x ...T) S, where T is the element type of S
+ // spec: "The variadic function append appends zero or more values x to s of type
+ // S, which must be a slice type, and returns the resulting slice, also of type S.
+ // The values x are passed to a parameter of type ...T where T is the element type
+ // of S and the respective parameter passing rules apply."
+ S := x.typ
+ var T Type
+ if s, _ := S.Underlying().(*Slice); s != nil {
+ T = s.elem
+ } else {
+ check.invalidArg(x.pos(), "%s is not a slice", x)
+ return
+ }
+
+ // remember arguments that have been evaluated already
+ alist := []operand{*x}
+
+ // spec: "As a special case, append also accepts a first argument assignable
+ // to type []byte with a second argument of string type followed by ... .
+ // This form appends the bytes of the string.
+ if nargs == 2 && call.Ellipsis.IsValid() && x.assignableTo(check.conf, NewSlice(UniverseByte)) {
+ arg(x, 1)
+ if x.mode == invalid {
+ return
+ }
+ if isString(x.typ) {
+ if check.Types != nil {
+ sig := makeSig(S, S, x.typ)
+ sig.variadic = true
+ check.recordBuiltinType(call.Fun, sig)
+ }
+ x.mode = value
+ x.typ = S
+ break
+ }
+ alist = append(alist, *x)
+ // fallthrough
+ }
+
+ // check general case by creating custom signature
+ sig := makeSig(S, S, NewSlice(T)) // []T required for variadic signature
+ sig.variadic = true
+ check.arguments(x, call, sig, func(x *operand, i int) {
+ // only evaluate arguments that have not been evaluated before
+ if i < len(alist) {
+ *x = alist[i]
+ return
+ }
+ arg(x, i)
+ }, nargs)
+ // ok to continue even if check.arguments reported errors
+
+ x.mode = value
+ x.typ = S
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, sig)
+ }
+
+ case _Cap, _Len:
+ // cap(x)
+ // len(x)
+ mode := invalid
+ var typ Type
+ var val exact.Value
+ switch typ = implicitArrayDeref(x.typ.Underlying()); t := typ.(type) {
+ case *Basic:
+ if isString(t) && id == _Len {
+ if x.mode == constant {
+ mode = constant
+ val = exact.MakeInt64(int64(len(exact.StringVal(x.val))))
+ } else {
+ mode = value
+ }
+ }
+
+ case *Array:
+ mode = value
+ // spec: "The expressions len(s) and cap(s) are constants
+ // if the type of s is an array or pointer to an array and
+ // the expression s does not contain channel receives or
+ // function calls; in this case s is not evaluated."
+ if !check.hasCallOrRecv {
+ mode = constant
+ val = exact.MakeInt64(t.len)
+ }
+
+ case *Slice, *Chan:
+ mode = value
+
+ case *Map:
+ if id == _Len {
+ mode = value
+ }
+ }
+
+ if mode == invalid {
+ check.invalidArg(x.pos(), "%s for %s", x, bin.name)
+ return
+ }
+
+ x.mode = mode
+ x.typ = Typ[Int]
+ x.val = val
+ if check.Types != nil && mode != constant {
+ check.recordBuiltinType(call.Fun, makeSig(x.typ, typ))
+ }
+
+ case _Close:
+ // close(c)
+ c, _ := x.typ.Underlying().(*Chan)
+ if c == nil {
+ check.invalidArg(x.pos(), "%s is not a channel", x)
+ return
+ }
+ if c.dir == RecvOnly {
+ check.invalidArg(x.pos(), "%s must not be a receive-only channel", x)
+ return
+ }
+
+ x.mode = novalue
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(nil, c))
+ }
+
+ case _Complex:
+ // complex(x, y realT) complexT
+ if !check.complexArg(x) {
+ return
+ }
+
+ var y operand
+ arg(&y, 1)
+ if y.mode == invalid {
+ return
+ }
+ if !check.complexArg(&y) {
+ return
+ }
+
+ check.convertUntyped(x, y.typ)
+ if x.mode == invalid {
+ return
+ }
+ check.convertUntyped(&y, x.typ)
+ if y.mode == invalid {
+ return
+ }
+
+ if !Identical(x.typ, y.typ) {
+ check.invalidArg(x.pos(), "mismatched types %s and %s", x.typ, y.typ)
+ return
+ }
+
+ if x.mode == constant && y.mode == constant {
+ x.val = exact.BinaryOp(x.val, token.ADD, exact.MakeImag(y.val))
+ } else {
+ x.mode = value
+ }
+
+ realT := x.typ
+ complexT := Typ[Invalid]
+ switch realT.Underlying().(*Basic).kind {
+ case Float32:
+ complexT = Typ[Complex64]
+ case Float64:
+ complexT = Typ[Complex128]
+ case UntypedInt, UntypedRune, UntypedFloat:
+ if x.mode == constant {
+ realT = defaultType(realT).(*Basic)
+ complexT = Typ[UntypedComplex]
+ } else {
+ // untyped but not constant; probably because one
+ // operand is a non-constant shift of untyped lhs
+ realT = Typ[Float64]
+ complexT = Typ[Complex128]
+ }
+ default:
+ check.invalidArg(x.pos(), "float32 or float64 arguments expected")
+ return
+ }
+
+ x.typ = complexT
+ if check.Types != nil && x.mode != constant {
+ check.recordBuiltinType(call.Fun, makeSig(complexT, realT, realT))
+ }
+
+ if x.mode != constant {
+ // The arguments have now their final types, which at run-
+ // time will be materialized. Update the expression trees.
+ // If the current types are untyped, the materialized type
+ // is the respective default type.
+ // (If the result is constant, the arguments are never
+ // materialized and there is nothing to do.)
+ check.updateExprType(x.expr, realT, true)
+ check.updateExprType(y.expr, realT, true)
+ }
+
+ case _Copy:
+ // copy(x, y []T) int
+ var dst Type
+ if t, _ := x.typ.Underlying().(*Slice); t != nil {
+ dst = t.elem
+ }
+
+ var y operand
+ arg(&y, 1)
+ if y.mode == invalid {
+ return
+ }
+ var src Type
+ switch t := y.typ.Underlying().(type) {
+ case *Basic:
+ if isString(y.typ) {
+ src = UniverseByte
+ }
+ case *Slice:
+ src = t.elem
+ }
+
+ if dst == nil || src == nil {
+ check.invalidArg(x.pos(), "copy expects slice arguments; found %s and %s", x, &y)
+ return
+ }
+
+ if !Identical(dst, src) {
+ check.invalidArg(x.pos(), "arguments to copy %s and %s have different element types %s and %s", x, &y, dst, src)
+ return
+ }
+
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(Typ[Int], x.typ, y.typ))
+ }
+ x.mode = value
+ x.typ = Typ[Int]
+
+ case _Delete:
+ // delete(m, k)
+ m, _ := x.typ.Underlying().(*Map)
+ if m == nil {
+ check.invalidArg(x.pos(), "%s is not a map", x)
+ return
+ }
+ arg(x, 1) // k
+ if x.mode == invalid {
+ return
+ }
+
+ if !x.assignableTo(check.conf, m.key) {
+ check.invalidArg(x.pos(), "%s is not assignable to %s", x, m.key)
+ return
+ }
+
+ x.mode = novalue
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(nil, m, m.key))
+ }
+
+ case _Imag, _Real:
+ // imag(complexT) realT
+ // real(complexT) realT
+ if !isComplex(x.typ) {
+ check.invalidArg(x.pos(), "%s must be a complex number", x)
+ return
+ }
+ if x.mode == constant {
+ if id == _Real {
+ x.val = exact.Real(x.val)
+ } else {
+ x.val = exact.Imag(x.val)
+ }
+ } else {
+ x.mode = value
+ }
+ var k BasicKind
+ switch x.typ.Underlying().(*Basic).kind {
+ case Complex64:
+ k = Float32
+ case Complex128:
+ k = Float64
+ case UntypedComplex:
+ k = UntypedFloat
+ default:
+ unreachable()
+ }
+
+ if check.Types != nil && x.mode != constant {
+ check.recordBuiltinType(call.Fun, makeSig(Typ[k], x.typ))
+ }
+ x.typ = Typ[k]
+
+ case _Make:
+ // make(T, n)
+ // make(T, n, m)
+ // (no argument evaluated yet)
+ arg0 := call.Args[0]
+ T := check.typ(arg0)
+ if T == Typ[Invalid] {
+ return
+ }
+
+ var min int // minimum number of arguments
+ switch T.Underlying().(type) {
+ case *Slice:
+ min = 2
+ case *Map, *Chan:
+ min = 1
+ default:
+ check.invalidArg(arg0.Pos(), "cannot make %s; type must be slice, map, or channel", arg0)
+ return
+ }
+ if nargs < min || min+1 < nargs {
+ check.errorf(call.Pos(), "%s expects %d or %d arguments; found %d", call, min, min+1, nargs)
+ return
+ }
+ var sizes []int64 // constant integer arguments, if any
+ for _, arg := range call.Args[1:] {
+ if s, ok := check.index(arg, -1); ok && s >= 0 {
+ sizes = append(sizes, s)
+ }
+ }
+ if len(sizes) == 2 && sizes[0] > sizes[1] {
+ check.invalidArg(call.Args[1].Pos(), "length and capacity swapped")
+ // safe to continue
+ }
+ x.mode = value
+ x.typ = T
+ if check.Types != nil {
+ params := [...]Type{T, Typ[Int], Typ[Int]}
+ check.recordBuiltinType(call.Fun, makeSig(x.typ, params[:1+len(sizes)]...))
+ }
+
+ case _New:
+ // new(T)
+ // (no argument evaluated yet)
+ T := check.typ(call.Args[0])
+ if T == Typ[Invalid] {
+ return
+ }
+
+ x.mode = value
+ x.typ = &Pointer{base: T}
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(x.typ, T))
+ }
+
+ case _Panic:
+ // panic(x)
+ T := new(Interface)
+ if !check.assignment(x, T) {
+ assert(x.mode == invalid)
+ return
+ }
+
+ x.mode = novalue
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(nil, T))
+ }
+
+ case _Print, _Println:
+ // print(x, y, ...)
+ // println(x, y, ...)
+ var params []Type
+ if nargs > 0 {
+ params = make([]Type, nargs)
+ for i := 0; i < nargs; i++ {
+ if i > 0 {
+ arg(x, i) // first argument already evaluated
+ }
+ if !check.assignment(x, nil) {
+ assert(x.mode == invalid)
+ return
+ }
+ params[i] = x.typ
+ }
+ }
+
+ x.mode = novalue
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(nil, params...))
+ }
+
+ case _Recover:
+ // recover() interface{}
+ x.mode = value
+ x.typ = new(Interface)
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(x.typ))
+ }
+
+ case _Alignof:
+ // unsafe.Alignof(x T) uintptr
+ if !check.assignment(x, nil) {
+ assert(x.mode == invalid)
+ return
+ }
+
+ x.mode = constant
+ x.val = exact.MakeInt64(check.conf.alignof(x.typ))
+ x.typ = Typ[Uintptr]
+ // result is constant - no need to record signature
+
+ case _Offsetof:
+ // unsafe.Offsetof(x T) uintptr, where x must be a selector
+ // (no argument evaluated yet)
+ arg0 := call.Args[0]
+ selx, _ := unparen(arg0).(*ast.SelectorExpr)
+ if selx == nil {
+ check.invalidArg(arg0.Pos(), "%s is not a selector expression", arg0)
+ check.use(arg0)
+ return
+ }
+
+ check.expr(x, selx.X)
+ if x.mode == invalid {
+ return
+ }
+
+ base := derefStructPtr(x.typ)
+ sel := selx.Sel.Name
+ obj, index, indirect := LookupFieldOrMethod(base, false, check.pkg, sel)
+ switch obj.(type) {
+ case nil:
+ check.invalidArg(x.pos(), "%s has no single field %s", base, sel)
+ return
+ case *Func:
+ // TODO(gri) Using derefStructPtr may result in methods being found
+ // that don't actually exist. An error either way, but the error
+ // message is confusing. See: http://play.golang.org/p/al75v23kUy ,
+ // but go/types reports: "invalid argument: x.m is a method value".
+ check.invalidArg(arg0.Pos(), "%s is a method value", arg0)
+ return
+ }
+ if indirect {
+ check.invalidArg(x.pos(), "field %s is embedded via a pointer in %s", sel, base)
+ return
+ }
+
+ // TODO(gri) Should we pass x.typ instead of base (and indirect report if derefStructPtr indirected)?
+ check.recordSelection(selx, FieldVal, base, obj, index, false)
+
+ offs := check.conf.offsetof(base, index)
+ x.mode = constant
+ x.val = exact.MakeInt64(offs)
+ x.typ = Typ[Uintptr]
+ // result is constant - no need to record signature
+
+ case _Sizeof:
+ // unsafe.Sizeof(x T) uintptr
+ if !check.assignment(x, nil) {
+ assert(x.mode == invalid)
+ return
+ }
+
+ x.mode = constant
+ x.val = exact.MakeInt64(check.conf.sizeof(x.typ))
+ x.typ = Typ[Uintptr]
+ // result is constant - no need to record signature
+
+ case _Assert:
+ // assert(pred) causes a typechecker error if pred is false.
+ // The result of assert is the value of pred if there is no error.
+ // Note: assert is only available in self-test mode.
+ if x.mode != constant || !isBoolean(x.typ) {
+ check.invalidArg(x.pos(), "%s is not a boolean constant", x)
+ return
+ }
+ if x.val.Kind() != exact.Bool {
+ check.errorf(x.pos(), "internal error: value of %s should be a boolean constant", x)
+ return
+ }
+ if !exact.BoolVal(x.val) {
+ check.errorf(call.Pos(), "%s failed", call)
+ // compile-time assertion failure - safe to continue
+ }
+ // result is constant - no need to record signature
+
+ case _Trace:
+ // trace(x, y, z, ...) dumps the positions, expressions, and
+ // values of its arguments. The result of trace is the value
+ // of the first argument.
+ // Note: trace is only available in self-test mode.
+ // (no argument evaluated yet)
+ if nargs == 0 {
+ check.dump("%s: trace() without arguments", call.Pos())
+ x.mode = novalue
+ break
+ }
+ var t operand
+ x1 := x
+ for _, arg := range call.Args {
+ check.rawExpr(x1, arg, nil) // permit trace for types, e.g.: new(trace(T))
+ check.dump("%s: %s", x1.pos(), x1)
+ x1 = &t // use incoming x only for first argument
+ }
+ // trace is only available in test mode - no need to record signature
+
+ default:
+ unreachable()
+ }
+
+ return true
+}
+
+// makeSig makes a signature for the given argument and result types.
+// Default types are used for untyped arguments, and res may be nil.
+func makeSig(res Type, args ...Type) *Signature {
+ list := make([]*Var, len(args))
+ for i, param := range args {
+ list[i] = NewVar(token.NoPos, nil, "", defaultType(param))
+ }
+ params := NewTuple(list...)
+ var result *Tuple
+ if res != nil {
+ assert(!isUntyped(res))
+ result = NewTuple(NewVar(token.NoPos, nil, "", res))
+ }
+ return &Signature{params: params, results: result}
+}
+
+// implicitArrayDeref returns A if typ is of the form *A and A is an array;
+// otherwise it returns typ.
+//
+func implicitArrayDeref(typ Type) Type {
+ if p, ok := typ.(*Pointer); ok {
+ if a, ok := p.base.Underlying().(*Array); ok {
+ return a
+ }
+ }
+ return typ
+}
+
+// unparen removes any parentheses surrounding an expression and returns
+// the naked expression.
+//
+func unparen(x ast.Expr) ast.Expr {
+ if p, ok := x.(*ast.ParenExpr); ok {
+ return unparen(p.X)
+ }
+ return x
+}
+
+func (check *Checker) complexArg(x *operand) bool {
+ t, _ := x.typ.Underlying().(*Basic)
+ if t != nil && (t.info&IsFloat != 0 || t.kind == UntypedInt || t.kind == UntypedRune) {
+ return true
+ }
+ check.invalidArg(x.pos(), "%s must be a float32, float64, or an untyped non-complex numeric constant", x)
+ return false
+}
diff --git a/llgo/third_party/go.tools/go/types/builtins_test.go b/llgo/third_party/go.tools/go/types/builtins_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..d1e9eceae5df1f832d61d8302809be0ddb441605
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/builtins_test.go
@@ -0,0 +1,204 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types_test
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "testing"
+
+ _ "llvm.org/llgo/third_party/go.tools/go/gcimporter"
+ . "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+var builtinCalls = []struct {
+ name, src, sig string
+}{
+ {"append", `var s []int; _ = append(s)`, `func([]int, ...int) []int`},
+ {"append", `var s []int; _ = append(s, 0)`, `func([]int, ...int) []int`},
+ {"append", `var s []int; _ = (append)(s, 0)`, `func([]int, ...int) []int`},
+ {"append", `var s []byte; _ = ((append))(s, 0)`, `func([]byte, ...byte) []byte`},
+ {"append", `var s []byte; _ = append(s, "foo"...)`, `func([]byte, string...) []byte`},
+ {"append", `type T []byte; var s T; var str string; _ = append(s, str...)`, `func(p.T, string...) p.T`},
+ {"append", `type T []byte; type U string; var s T; var str U; _ = append(s, str...)`, `func(p.T, p.U...) p.T`},
+
+ {"cap", `var s [10]int; _ = cap(s)`, `invalid type`}, // constant
+ {"cap", `var s [10]int; _ = cap(&s)`, `invalid type`}, // constant
+ {"cap", `var s []int64; _ = cap(s)`, `func([]int64) int`},
+ {"cap", `var c chan<-bool; _ = cap(c)`, `func(chan<- bool) int`},
+
+ {"len", `_ = len("foo")`, `invalid type`}, // constant
+ {"len", `var s string; _ = len(s)`, `func(string) int`},
+ {"len", `var s [10]int; _ = len(s)`, `invalid type`}, // constant
+ {"len", `var s [10]int; _ = len(&s)`, `invalid type`}, // constant
+ {"len", `var s []int64; _ = len(s)`, `func([]int64) int`},
+ {"len", `var c chan<-bool; _ = len(c)`, `func(chan<- bool) int`},
+ {"len", `var m map[string]float32; _ = len(m)`, `func(map[string]float32) int`},
+
+ {"close", `var c chan int; close(c)`, `func(chan int)`},
+ {"close", `var c chan<- chan string; close(c)`, `func(chan<- chan string)`},
+
+ {"complex", `_ = complex(1, 0)`, `invalid type`}, // constant
+ {"complex", `var re float32; _ = complex(re, 1.0)`, `func(float32, float32) complex64`},
+ {"complex", `var im float64; _ = complex(1, im)`, `func(float64, float64) complex128`},
+ {"complex", `type F32 float32; var re, im F32; _ = complex(re, im)`, `func(p.F32, p.F32) complex64`},
+ {"complex", `type F64 float64; var re, im F64; _ = complex(re, im)`, `func(p.F64, p.F64) complex128`},
+
+ {"copy", `var src, dst []byte; copy(dst, src)`, `func([]byte, []byte) int`},
+ {"copy", `type T [][]int; var src, dst T; _ = copy(dst, src)`, `func(p.T, p.T) int`},
+ {"copy", `var src string; var dst []byte; copy(dst, src)`, `func([]byte, string) int`},
+ {"copy", `type T string; type U []byte; var src T; var dst U; copy(dst, src)`, `func(p.U, p.T) int`},
+ {"copy", `var dst []byte; copy(dst, "hello")`, `func([]byte, string) int`},
+
+ {"delete", `var m map[string]bool; delete(m, "foo")`, `func(map[string]bool, string)`},
+ {"delete", `type (K string; V int); var m map[K]V; delete(m, "foo")`, `func(map[p.K]p.V, p.K)`},
+
+ {"imag", `_ = imag(1i)`, `invalid type`}, // constant
+ {"imag", `var c complex64; _ = imag(c)`, `func(complex64) float32`},
+ {"imag", `var c complex128; _ = imag(c)`, `func(complex128) float64`},
+ {"imag", `type C64 complex64; var c C64; _ = imag(c)`, `func(p.C64) float32`},
+ {"imag", `type C128 complex128; var c C128; _ = imag(c)`, `func(p.C128) float64`},
+
+ {"real", `_ = real(1i)`, `invalid type`}, // constant
+ {"real", `var c complex64; _ = real(c)`, `func(complex64) float32`},
+ {"real", `var c complex128; _ = real(c)`, `func(complex128) float64`},
+ {"real", `type C64 complex64; var c C64; _ = real(c)`, `func(p.C64) float32`},
+ {"real", `type C128 complex128; var c C128; _ = real(c)`, `func(p.C128) float64`},
+
+ {"make", `_ = make([]int, 10)`, `func([]int, int) []int`},
+ {"make", `type T []byte; _ = make(T, 10, 20)`, `func(p.T, int, int) p.T`},
+
+ {"new", `_ = new(int)`, `func(int) *int`},
+ {"new", `type T struct{}; _ = new(T)`, `func(p.T) *p.T`},
+
+ {"panic", `panic(0)`, `func(interface{})`},
+ {"panic", `panic("foo")`, `func(interface{})`},
+
+ {"print", `print()`, `func()`},
+ {"print", `print(0)`, `func(int)`},
+ {"print", `print(1, 2.0, "foo", true)`, `func(int, float64, string, bool)`},
+
+ {"println", `println()`, `func()`},
+ {"println", `println(0)`, `func(int)`},
+ {"println", `println(1, 2.0, "foo", true)`, `func(int, float64, string, bool)`},
+
+ {"recover", `recover()`, `func() interface{}`},
+ {"recover", `_ = recover()`, `func() interface{}`},
+
+ {"Alignof", `_ = unsafe.Alignof(0)`, `invalid type`}, // constant
+ {"Alignof", `var x struct{}; _ = unsafe.Alignof(x)`, `invalid type`}, // constant
+
+ {"Offsetof", `var x struct{f bool}; _ = unsafe.Offsetof(x.f)`, `invalid type`}, // constant
+ {"Offsetof", `var x struct{_ int; f bool}; _ = unsafe.Offsetof((&x).f)`, `invalid type`}, // constant
+
+ {"Sizeof", `_ = unsafe.Sizeof(0)`, `invalid type`}, // constant
+ {"Sizeof", `var x struct{}; _ = unsafe.Sizeof(x)`, `invalid type`}, // constant
+
+ {"assert", `assert(true)`, `invalid type`}, // constant
+ {"assert", `type B bool; const pred B = 1 < 2; assert(pred)`, `invalid type`}, // constant
+
+ // no tests for trace since it produces output as a side-effect
+}
+
+func TestBuiltinSignatures(t *testing.T) {
+ DefPredeclaredTestFuncs()
+
+ seen := map[string]bool{"trace": true} // no test for trace built-in; add it manually
+ for _, call := range builtinCalls {
+ testBuiltinSignature(t, call.name, call.src, call.sig)
+ seen[call.name] = true
+ }
+
+ // make sure we didn't miss one
+ for _, name := range Universe.Names() {
+ if _, ok := Universe.Lookup(name).(*Builtin); ok && !seen[name] {
+ t.Errorf("missing test for %s", name)
+ }
+ }
+ for _, name := range Unsafe.Scope().Names() {
+ if _, ok := Unsafe.Scope().Lookup(name).(*Builtin); ok && !seen[name] {
+ t.Errorf("missing test for unsafe.%s", name)
+ }
+ }
+}
+
+func testBuiltinSignature(t *testing.T, name, src0, want string) {
+ src := fmt.Sprintf(`package p; import "unsafe"; type _ unsafe.Pointer /* use unsafe */; func _() { %s }`, src0)
+ f, err := parser.ParseFile(fset, "", src, 0)
+ if err != nil {
+ t.Errorf("%s: %s", src0, err)
+ return
+ }
+
+ var conf Config
+ uses := make(map[*ast.Ident]Object)
+ types := make(map[ast.Expr]TypeAndValue)
+ _, err = conf.Check(f.Name.Name, fset, []*ast.File{f}, &Info{Uses: uses, Types: types})
+ if err != nil {
+ t.Errorf("%s: %s", src0, err)
+ return
+ }
+
+ // find called function
+ n := 0
+ var fun ast.Expr
+ for x := range types {
+ if call, _ := x.(*ast.CallExpr); call != nil {
+ fun = call.Fun
+ n++
+ }
+ }
+ if n != 1 {
+ t.Errorf("%s: got %d CallExprs; want 1", src0, n)
+ return
+ }
+
+ // check recorded types for fun and descendents (may be parenthesized)
+ for {
+ // the recorded type for the built-in must match the wanted signature
+ typ := types[fun].Type
+ if typ == nil {
+ t.Errorf("%s: no type recorded for %s", src0, ExprString(fun))
+ return
+ }
+ if got := typ.String(); got != want {
+ t.Errorf("%s: got type %s; want %s", src0, got, want)
+ return
+ }
+
+ // called function must be a (possibly parenthesized, qualified)
+ // identifier denoting the expected built-in
+ switch p := fun.(type) {
+ case *ast.Ident:
+ obj := uses[p]
+ if obj == nil {
+ t.Errorf("%s: no object found for %s", src0, p)
+ return
+ }
+ bin, _ := obj.(*Builtin)
+ if bin == nil {
+ t.Errorf("%s: %s does not denote a built-in", src0, p)
+ return
+ }
+ if bin.Name() != name {
+ t.Errorf("%s: got built-in %s; want %s", src0, bin.Name(), name)
+ return
+ }
+ return // we're done
+
+ case *ast.ParenExpr:
+ fun = p.X // unpack
+
+ case *ast.SelectorExpr:
+ // built-in from package unsafe - ignore details
+ return // we're done
+
+ default:
+ t.Errorf("%s: invalid function call", src0)
+ return
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/call.go b/llgo/third_party/go.tools/go/types/call.go
new file mode 100644
index 0000000000000000000000000000000000000000..a392d9106d58a8dbab9158398751ddf28797fd5a
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/call.go
@@ -0,0 +1,427 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of call and selector expressions.
+
+package types
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+func (check *Checker) call(x *operand, e *ast.CallExpr) exprKind {
+ check.exprOrType(x, e.Fun)
+
+ switch x.mode {
+ case invalid:
+ check.use(e.Args...)
+ x.mode = invalid
+ x.expr = e
+ return statement
+
+ case typexpr:
+ // conversion
+ T := x.typ
+ x.mode = invalid
+ switch n := len(e.Args); n {
+ case 0:
+ check.errorf(e.Rparen, "missing argument in conversion to %s", T)
+ case 1:
+ check.expr(x, e.Args[0])
+ if x.mode != invalid {
+ check.conversion(x, T)
+ }
+ default:
+ check.errorf(e.Args[n-1].Pos(), "too many arguments in conversion to %s", T)
+ }
+ x.expr = e
+ return conversion
+
+ case builtin:
+ id := x.id
+ if !check.builtin(x, e, id) {
+ x.mode = invalid
+ }
+ x.expr = e
+ // a non-constant result implies a function call
+ if x.mode != invalid && x.mode != constant {
+ check.hasCallOrRecv = true
+ }
+ return predeclaredFuncs[id].kind
+
+ default:
+ // function/method call
+ sig, _ := x.typ.Underlying().(*Signature)
+ if sig == nil {
+ check.invalidOp(x.pos(), "cannot call non-function %s", x)
+ x.mode = invalid
+ x.expr = e
+ return statement
+ }
+
+ arg, n, _ := unpack(func(x *operand, i int) { check.expr(x, e.Args[i]) }, len(e.Args), false)
+ if arg == nil {
+ x.mode = invalid
+ x.expr = e
+ return statement
+ }
+
+ check.arguments(x, e, sig, arg, n)
+
+ // determine result
+ switch sig.results.Len() {
+ case 0:
+ x.mode = novalue
+ case 1:
+ x.mode = value
+ x.typ = sig.results.vars[0].typ // unpack tuple
+ default:
+ x.mode = value
+ x.typ = sig.results
+ }
+ x.expr = e
+ check.hasCallOrRecv = true
+
+ return statement
+ }
+}
+
+// use type-checks each argument.
+// Useful to make sure expressions are evaluated
+// (and variables are "used") in the presence of other errors.
+func (check *Checker) use(arg ...ast.Expr) {
+ var x operand
+ for _, e := range arg {
+ check.rawExpr(&x, e, nil)
+ }
+}
+
+// useGetter is like use, but takes a getter instead of a list of expressions.
+// It should be called instead of use if a getter is present to avoid repeated
+// evaluation of the first argument (since the getter was likely obtained via
+// unpack, which may have evaluated the first argument already).
+func (check *Checker) useGetter(get getter, n int) {
+ var x operand
+ for i := 0; i < n; i++ {
+ get(&x, i)
+ }
+}
+
+// A getter sets x as the i'th operand, where 0 <= i < n and n is the total
+// number of operands (context-specific, and maintained elsewhere). A getter
+// type-checks the i'th operand; the details of the actual check are getter-
+// specific.
+type getter func(x *operand, i int)
+
+// unpack takes a getter get and a number of operands n. If n == 1, unpack
+// calls the incoming getter for the first operand. If that operand is
+// invalid, unpack returns (nil, 0, false). Otherwise, if that operand is a
+// function call, or a comma-ok expression and allowCommaOk is set, the result
+// is a new getter and operand count providing access to the function results,
+// or comma-ok values, respectively. The third result value reports if it
+// is indeed the comma-ok case. In all other cases, the incoming getter and
+// operand count are returned unchanged, and the third result value is false.
+//
+// In other words, if there's exactly one operand that - after type-checking
+// by calling get - stands for multiple operands, the resulting getter provides
+// access to those operands instead.
+//
+// If the returned getter is called at most once for a given operand index i
+// (including i == 0), that operand is guaranteed to cause only one call of
+// the incoming getter with that i.
+//
+func unpack(get getter, n int, allowCommaOk bool) (getter, int, bool) {
+ if n == 1 {
+ // possibly result of an n-valued function call or comma,ok value
+ var x0 operand
+ get(&x0, 0)
+ if x0.mode == invalid {
+ return nil, 0, false
+ }
+
+ if t, ok := x0.typ.(*Tuple); ok {
+ // result of an n-valued function call
+ return func(x *operand, i int) {
+ x.mode = value
+ x.expr = x0.expr
+ x.typ = t.At(i).typ
+ }, t.Len(), false
+ }
+
+ if x0.mode == mapindex || x0.mode == commaok {
+ // comma-ok value
+ if allowCommaOk {
+ a := [2]Type{x0.typ, Typ[UntypedBool]}
+ return func(x *operand, i int) {
+ x.mode = value
+ x.expr = x0.expr
+ x.typ = a[i]
+ }, 2, true
+ }
+ x0.mode = value
+ }
+
+ // single value
+ return func(x *operand, i int) {
+ if i != 0 {
+ unreachable()
+ }
+ *x = x0
+ }, 1, false
+ }
+
+ // zero or multiple values
+ return get, n, false
+}
+
+// arguments checks argument passing for the call with the given signature.
+// The arg function provides the operand for the i'th argument.
+func (check *Checker) arguments(x *operand, call *ast.CallExpr, sig *Signature, arg getter, n int) {
+ passSlice := false
+ if call.Ellipsis.IsValid() {
+ // last argument is of the form x...
+ if sig.variadic {
+ passSlice = true
+ } else {
+ check.errorf(call.Ellipsis, "cannot use ... in call to non-variadic %s", call.Fun)
+ // ok to continue
+ }
+ }
+
+ // evaluate arguments
+ for i := 0; i < n; i++ {
+ arg(x, i)
+ if x.mode != invalid {
+ check.argument(sig, i, x, passSlice && i == n-1)
+ }
+ }
+
+ // check argument count
+ if sig.variadic {
+ // a variadic function accepts an "empty"
+ // last argument: count one extra
+ n++
+ }
+ if n < sig.params.Len() {
+ check.errorf(call.Rparen, "too few arguments in call to %s", call.Fun)
+ // ok to continue
+ }
+}
+
+// argument checks passing of argument x to the i'th parameter of the given signature.
+// If passSlice is set, the argument is followed by ... in the call.
+func (check *Checker) argument(sig *Signature, i int, x *operand, passSlice bool) {
+ n := sig.params.Len()
+
+ // determine parameter type
+ var typ Type
+ switch {
+ case i < n:
+ typ = sig.params.vars[i].typ
+ case sig.variadic:
+ typ = sig.params.vars[n-1].typ
+ if debug {
+ if _, ok := typ.(*Slice); !ok {
+ check.dump("%s: expected unnamed slice type, got %s", sig.params.vars[n-1].Pos(), typ)
+ }
+ }
+ default:
+ check.errorf(x.pos(), "too many arguments")
+ return
+ }
+
+ if passSlice {
+ // argument is of the form x...
+ if i != n-1 {
+ check.errorf(x.pos(), "can only use ... with matching parameter")
+ return
+ }
+ if _, ok := x.typ.Underlying().(*Slice); !ok {
+ check.errorf(x.pos(), "cannot use %s as parameter of type %s", x, typ)
+ return
+ }
+ } else if sig.variadic && i >= n-1 {
+ // use the variadic parameter slice's element type
+ typ = typ.(*Slice).elem
+ }
+
+ if !check.assignment(x, typ) && x.mode != invalid {
+ check.errorf(x.pos(), "cannot pass argument %s to parameter of type %s", x, typ)
+ }
+}
+
+func (check *Checker) selector(x *operand, e *ast.SelectorExpr) {
+ // these must be declared before the "goto Error" statements
+ var (
+ obj Object
+ index []int
+ indirect bool
+ )
+
+ sel := e.Sel.Name
+ // If the identifier refers to a package, handle everything here
+ // so we don't need a "package" mode for operands: package names
+ // can only appear in qualified identifiers which are mapped to
+ // selector expressions.
+ if ident, ok := e.X.(*ast.Ident); ok {
+ _, obj := check.scope.LookupParent(ident.Name)
+ if pkg, _ := obj.(*PkgName); pkg != nil {
+ assert(pkg.pkg == check.pkg)
+ check.recordUse(ident, pkg)
+ pkg.used = true
+ exp := pkg.imported.scope.Lookup(sel)
+ if exp == nil {
+ if !pkg.imported.fake {
+ check.errorf(e.Pos(), "%s not declared by package %s", sel, ident)
+ }
+ goto Error
+ }
+ if !exp.Exported() {
+ check.errorf(e.Pos(), "%s not exported by package %s", sel, ident)
+ // ok to continue
+ }
+ check.recordUse(e.Sel, exp)
+ // Simplified version of the code for *ast.Idents:
+ // - imported objects are always fully initialized
+ switch exp := exp.(type) {
+ case *Const:
+ assert(exp.Val() != nil)
+ x.mode = constant
+ x.typ = exp.typ
+ x.val = exp.val
+ case *TypeName:
+ x.mode = typexpr
+ x.typ = exp.typ
+ case *Var:
+ x.mode = variable
+ x.typ = exp.typ
+ case *Func:
+ x.mode = value
+ x.typ = exp.typ
+ case *Builtin:
+ x.mode = builtin
+ x.typ = exp.typ
+ x.id = exp.id
+ default:
+ unreachable()
+ }
+ x.expr = e
+ return
+ }
+ }
+
+ check.exprOrType(x, e.X)
+ if x.mode == invalid {
+ goto Error
+ }
+
+ obj, index, indirect = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel)
+ if obj == nil {
+ switch {
+ case index != nil:
+ // TODO(gri) should provide actual type where the conflict happens
+ check.invalidOp(e.Pos(), "ambiguous selector %s", sel)
+ case indirect:
+ check.invalidOp(e.Pos(), "%s is not in method set of %s", sel, x.typ)
+ default:
+ check.invalidOp(e.Pos(), "%s has no field or method %s", x, sel)
+ }
+ goto Error
+ }
+
+ if x.mode == typexpr {
+ // method expression
+ m, _ := obj.(*Func)
+ if m == nil {
+ check.invalidOp(e.Pos(), "%s has no method %s", x, sel)
+ goto Error
+ }
+
+ check.recordSelection(e, MethodExpr, x.typ, m, index, indirect)
+
+ // the receiver type becomes the type of the first function
+ // argument of the method expression's function type
+ var params []*Var
+ sig := m.typ.(*Signature)
+ if sig.params != nil {
+ params = sig.params.vars
+ }
+ x.mode = value
+ x.typ = &Signature{
+ params: NewTuple(append([]*Var{NewVar(token.NoPos, check.pkg, "", x.typ)}, params...)...),
+ results: sig.results,
+ variadic: sig.variadic,
+ }
+
+ check.addDeclDep(m)
+
+ } else {
+ // regular selector
+ switch obj := obj.(type) {
+ case *Var:
+ check.recordSelection(e, FieldVal, x.typ, obj, index, indirect)
+ if x.mode == variable || indirect {
+ x.mode = variable
+ } else {
+ x.mode = value
+ }
+ x.typ = obj.typ
+
+ case *Func:
+ // TODO(gri) If we needed to take into account the receiver's
+ // addressability, should we report the type &(x.typ) instead?
+ check.recordSelection(e, MethodVal, x.typ, obj, index, indirect)
+
+ if debug {
+ // Verify that LookupFieldOrMethod and MethodSet.Lookup agree.
+ typ := x.typ
+ if x.mode == variable {
+ // If typ is not an (unnamed) pointer or an interface,
+ // use *typ instead, because the method set of *typ
+ // includes the methods of typ.
+ // Variables are addressable, so we can always take their
+ // address.
+ if _, ok := typ.(*Pointer); !ok && !isInterface(typ) {
+ typ = &Pointer{base: typ}
+ }
+ }
+ // If we created a synthetic pointer type above, we will throw
+ // away the method set computed here after use.
+ // TODO(gri) Method set computation should probably always compute
+ // both, the value and the pointer receiver method set and represent
+ // them in a single structure.
+ // TODO(gri) Consider also using a method set cache for the lifetime
+ // of checker once we rely on MethodSet lookup instead of individual
+ // lookup.
+ mset := NewMethodSet(typ)
+ if m := mset.Lookup(check.pkg, sel); m == nil || m.obj != obj {
+ check.dump("%s: (%s).%v -> %s", e.Pos(), typ, obj.name, m)
+ check.dump("%s\n", mset)
+ panic("method sets and lookup don't agree")
+ }
+ }
+
+ x.mode = value
+
+ // remove receiver
+ sig := *obj.typ.(*Signature)
+ sig.recv = nil
+ x.typ = &sig
+
+ check.addDeclDep(obj)
+
+ default:
+ unreachable()
+ }
+ }
+
+ // everything went well
+ x.expr = e
+ return
+
+Error:
+ x.mode = invalid
+ x.expr = e
+}
diff --git a/llgo/third_party/go.tools/go/types/check.go b/llgo/third_party/go.tools/go/types/check.go
new file mode 100644
index 0000000000000000000000000000000000000000..40fa7ff166c209d1f4af1ec8a6637bc561d4bb71
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/check.go
@@ -0,0 +1,361 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements the Check function, which drives type-checking.
+
+package types
+
+import (
+ "go/ast"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+)
+
+// debugging/development support
+const (
+ debug = false // leave on during development
+ trace = false // turn on for detailed type resolution traces
+)
+
+// If Strict is set, the type-checker enforces additional
+// rules not specified by the Go 1 spec, but which will
+// catch guaranteed run-time errors if the respective
+// code is executed. In other words, programs passing in
+// Strict mode are Go 1 compliant, but not all Go 1 programs
+// will pass in Strict mode. The additional rules are:
+//
+// - A type assertion x.(T) where T is an interface type
+// is invalid if any (statically known) method that exists
+// for both x and T have different signatures.
+//
+const strict = false
+
+// exprInfo stores information about an untyped expression.
+type exprInfo struct {
+ isLhs bool // expression is lhs operand of a shift with delayed type-check
+ mode operandMode
+ typ *Basic
+ val exact.Value // constant value; or nil (if not a constant)
+}
+
+// funcInfo stores the information required for type-checking a function.
+type funcInfo struct {
+ name string // for debugging/tracing only
+ decl *declInfo // for cycle detection
+ sig *Signature
+ body *ast.BlockStmt
+}
+
+// A context represents the context within which an object is type-checked.
+type context struct {
+ decl *declInfo // package-level declaration whose init expression/function body is checked
+ scope *Scope // top-most scope for lookups
+ iota exact.Value // value of iota in a constant declaration; nil otherwise
+ sig *Signature // function signature if inside a function; nil otherwise
+ hasLabel bool // set if a function makes use of labels (only ~1% of functions); unused outside functions
+ hasCallOrRecv bool // set if an expression contains a function call or channel receive operation
+}
+
+// A Checker maintains the state of the type checker.
+// It must be created with NewChecker.
+type Checker struct {
+ // package information
+ // (initialized by NewChecker, valid for the life-time of checker)
+ conf *Config
+ fset *token.FileSet
+ pkg *Package
+ *Info
+ objMap map[Object]*declInfo // maps package-level object to declaration info
+
+ // information collected during type-checking of a set of package files
+ // (initialized by Files, valid only for the duration of check.Files;
+ // maps and lists are allocated on demand)
+ files []*ast.File // package files
+ unusedDotImports map[*Scope]map[*Package]token.Pos // positions of unused dot-imported packages for each file scope
+
+ firstErr error // first error encountered
+ methods map[string][]*Func // maps type names to associated methods
+ untyped map[ast.Expr]exprInfo // map of expressions without final type
+ funcs []funcInfo // list of functions to type-check
+ delayed []func() // delayed checks requiring fully setup types
+
+ // context within which the current object is type-checked
+ // (valid only for the duration of type-checking a specific object)
+ context
+
+ // debugging
+ indent int // indentation for tracing
+}
+
+// addUnusedImport adds the position of a dot-imported package
+// pkg to the map of dot imports for the given file scope.
+func (check *Checker) addUnusedDotImport(scope *Scope, pkg *Package, pos token.Pos) {
+ mm := check.unusedDotImports
+ if mm == nil {
+ mm = make(map[*Scope]map[*Package]token.Pos)
+ check.unusedDotImports = mm
+ }
+ m := mm[scope]
+ if m == nil {
+ m = make(map[*Package]token.Pos)
+ mm[scope] = m
+ }
+ m[pkg] = pos
+}
+
+// addDeclDep adds the dependency edge (check.decl -> to) if check.decl exists
+func (check *Checker) addDeclDep(to Object) {
+ from := check.decl
+ if from == nil {
+ return // not in a package-level init expression
+ }
+ if _, found := check.objMap[to]; !found {
+ return // to is not a package-level object
+ }
+ from.addDep(to)
+}
+
+func (check *Checker) assocMethod(tname string, meth *Func) {
+ m := check.methods
+ if m == nil {
+ m = make(map[string][]*Func)
+ check.methods = m
+ }
+ m[tname] = append(m[tname], meth)
+}
+
+func (check *Checker) rememberUntyped(e ast.Expr, lhs bool, mode operandMode, typ *Basic, val exact.Value) {
+ m := check.untyped
+ if m == nil {
+ m = make(map[ast.Expr]exprInfo)
+ check.untyped = m
+ }
+ m[e] = exprInfo{lhs, mode, typ, val}
+}
+
+func (check *Checker) later(name string, decl *declInfo, sig *Signature, body *ast.BlockStmt) {
+ check.funcs = append(check.funcs, funcInfo{name, decl, sig, body})
+}
+
+func (check *Checker) delay(f func()) {
+ check.delayed = append(check.delayed, f)
+}
+
+// NewChecker returns a new Checker instance for a given package.
+// Package files may be added incrementally via checker.Files.
+func NewChecker(conf *Config, fset *token.FileSet, pkg *Package, info *Info) *Checker {
+ // make sure we have a configuration
+ if conf == nil {
+ conf = new(Config)
+ }
+
+ // make sure we have a package canonicalization map
+ if conf.Packages == nil {
+ conf.Packages = make(map[string]*Package)
+ }
+
+ // make sure we have an info struct
+ if info == nil {
+ info = new(Info)
+ }
+
+ return &Checker{
+ conf: conf,
+ fset: fset,
+ pkg: pkg,
+ Info: info,
+ objMap: make(map[Object]*declInfo),
+ }
+}
+
+// initFiles initializes the files-specific portion of checker.
+// The provided files must all belong to the same package.
+func (check *Checker) initFiles(files []*ast.File) {
+ // start with a clean slate (check.Files may be called multiple times)
+ check.files = nil
+ check.unusedDotImports = nil
+
+ check.firstErr = nil
+ check.methods = nil
+ check.untyped = nil
+ check.funcs = nil
+ check.delayed = nil
+
+ // determine package name and collect valid files
+ pkg := check.pkg
+ for _, file := range files {
+ switch name := file.Name.Name; pkg.name {
+ case "":
+ if name != "_" {
+ pkg.name = name
+ } else {
+ check.errorf(file.Name.Pos(), "invalid package name _")
+ }
+ fallthrough
+
+ case name:
+ check.files = append(check.files, file)
+
+ default:
+ check.errorf(file.Package, "package %s; expected %s", name, pkg.name)
+ // ignore this file
+ }
+ }
+}
+
+// A bailout panic is used for early termination.
+type bailout struct{}
+
+func (check *Checker) handleBailout(err *error) {
+ switch p := recover().(type) {
+ case nil, bailout:
+ // normal return or early exit
+ *err = check.firstErr
+ default:
+ // re-panic
+ panic(p)
+ }
+}
+
+// Files checks the provided files as part of the checker's package.
+func (check *Checker) Files(files []*ast.File) (err error) {
+ defer check.handleBailout(&err)
+
+ check.initFiles(files)
+
+ check.collectObjects()
+
+ check.packageObjects(check.resolveOrder())
+
+ check.functionBodies()
+
+ check.initOrder()
+
+ check.unusedImports()
+
+ // perform delayed checks
+ for _, f := range check.delayed {
+ f()
+ }
+
+ check.recordUntyped()
+
+ check.pkg.complete = true
+ return
+}
+
+func (check *Checker) recordUntyped() {
+ if !debug && check.Types == nil {
+ return // nothing to do
+ }
+
+ for x, info := range check.untyped {
+ if debug && isTyped(info.typ) {
+ check.dump("%s: %s (type %s) is typed", x.Pos(), x, info.typ)
+ unreachable()
+ }
+ check.recordTypeAndValue(x, info.mode, info.typ, info.val)
+ }
+}
+
+func (check *Checker) recordTypeAndValue(x ast.Expr, mode operandMode, typ Type, val exact.Value) {
+ assert(x != nil)
+ assert(typ != nil)
+ if mode == invalid {
+ return // omit
+ }
+ assert(typ != nil)
+ if mode == constant {
+ assert(val != nil)
+ assert(typ == Typ[Invalid] || isConstType(typ))
+ }
+ if m := check.Types; m != nil {
+ m[x] = TypeAndValue{mode, typ, val}
+ }
+}
+
+func (check *Checker) recordBuiltinType(f ast.Expr, sig *Signature) {
+ // f must be a (possibly parenthesized) identifier denoting a built-in
+ // (built-ins in package unsafe always produce a constant result and
+ // we don't record their signatures, so we don't see qualified idents
+ // here): record the signature for f and possible children.
+ for {
+ check.recordTypeAndValue(f, builtin, sig, nil)
+ switch p := f.(type) {
+ case *ast.Ident:
+ return // we're done
+ case *ast.ParenExpr:
+ f = p.X
+ default:
+ unreachable()
+ }
+ }
+}
+
+func (check *Checker) recordCommaOkTypes(x ast.Expr, a [2]Type) {
+ assert(x != nil)
+ if a[0] == nil || a[1] == nil {
+ return
+ }
+ assert(isTyped(a[0]) && isTyped(a[1]) && isBoolean(a[1]))
+ if m := check.Types; m != nil {
+ for {
+ tv := m[x]
+ assert(tv.Type != nil) // should have been recorded already
+ pos := x.Pos()
+ tv.Type = NewTuple(
+ NewVar(pos, check.pkg, "", a[0]),
+ NewVar(pos, check.pkg, "", a[1]),
+ )
+ m[x] = tv
+ // if x is a parenthesized expression (p.X), update p.X
+ p, _ := x.(*ast.ParenExpr)
+ if p == nil {
+ break
+ }
+ x = p.X
+ }
+ }
+}
+
+func (check *Checker) recordDef(id *ast.Ident, obj Object) {
+ assert(id != nil)
+ if m := check.Defs; m != nil {
+ m[id] = obj
+ }
+}
+
+func (check *Checker) recordUse(id *ast.Ident, obj Object) {
+ assert(id != nil)
+ assert(obj != nil)
+ if m := check.Uses; m != nil {
+ m[id] = obj
+ }
+}
+
+func (check *Checker) recordImplicit(node ast.Node, obj Object) {
+ assert(node != nil)
+ assert(obj != nil)
+ if m := check.Implicits; m != nil {
+ m[node] = obj
+ }
+}
+
+func (check *Checker) recordSelection(x *ast.SelectorExpr, kind SelectionKind, recv Type, obj Object, index []int, indirect bool) {
+ assert(obj != nil && (recv == nil || len(index) > 0))
+ check.recordUse(x.Sel, obj)
+ // TODO(gri) Should we also call recordTypeAndValue?
+ if m := check.Selections; m != nil {
+ m[x] = &Selection{kind, recv, obj, index, indirect}
+ }
+}
+
+func (check *Checker) recordScope(node ast.Node, scope *Scope) {
+ assert(node != nil)
+ assert(scope != nil)
+ if m := check.Scopes; m != nil {
+ m[node] = scope
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/check_test.go b/llgo/third_party/go.tools/go/types/check_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..8a9b873e8292353fe08f392d43c5d23b58785d56
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/check_test.go
@@ -0,0 +1,294 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a typechecker test harness. The packages specified
+// in tests are typechecked. Error messages reported by the typechecker are
+// compared against the error messages expected in the test files.
+//
+// Expected errors are indicated in the test files by putting a comment
+// of the form /* ERROR "rx" */ immediately following an offending token.
+// The harness will verify that an error matching the regular expression
+// rx is reported at that source position. Consecutive comments may be
+// used to indicate multiple errors for the same token position.
+//
+// For instance, the following test file indicates that a "not declared"
+// error should be reported for the undeclared variable x:
+//
+// package p
+// func f() {
+// _ = x /* ERROR "not declared" */ + 1
+// }
+
+// TODO(gri) Also collect strict mode errors of the form /* STRICT ... */
+// and test against strict mode.
+
+package types_test
+
+import (
+ "flag"
+ "go/ast"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "io/ioutil"
+ "regexp"
+ "strings"
+ "testing"
+
+ _ "llvm.org/llgo/third_party/go.tools/go/gcimporter"
+ . "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+var (
+ listErrors = flag.Bool("list", false, "list errors")
+ testFiles = flag.String("files", "", "space-separated list of test files")
+)
+
+// The test filenames do not end in .go so that they are invisible
+// to gofmt since they contain comments that must not change their
+// positions relative to surrounding tokens.
+
+// Each tests entry is list of files belonging to the same package.
+var tests = [][]string{
+ {"testdata/errors.src"},
+ {"testdata/importdecl0a.src", "testdata/importdecl0b.src"},
+ {"testdata/importdecl1a.src", "testdata/importdecl1b.src"},
+ {"testdata/cycles.src"},
+ {"testdata/cycles1.src"},
+ {"testdata/cycles2.src"},
+ {"testdata/cycles3.src"},
+ {"testdata/cycles4.src"},
+ {"testdata/init0.src"},
+ {"testdata/init1.src"},
+ {"testdata/init2.src"},
+ {"testdata/decls0.src"},
+ {"testdata/decls1.src"},
+ {"testdata/decls2a.src", "testdata/decls2b.src"},
+ {"testdata/decls3.src"},
+ {"testdata/const0.src"},
+ {"testdata/const1.src"},
+ {"testdata/constdecl.src"},
+ {"testdata/vardecl.src"},
+ {"testdata/expr0.src"},
+ {"testdata/expr1.src"},
+ {"testdata/expr2.src"},
+ {"testdata/expr3.src"},
+ {"testdata/methodsets.src"},
+ {"testdata/shifts.src"},
+ {"testdata/builtins.src"},
+ {"testdata/conversions.src"},
+ {"testdata/stmt0.src"},
+ {"testdata/stmt1.src"},
+ {"testdata/gotos.src"},
+ {"testdata/labels.src"},
+ {"testdata/issues.src"},
+ {"testdata/blank.src"},
+}
+
+var fset = token.NewFileSet()
+
+// Positioned errors are of the form filename:line:column: message .
+var posMsgRx = regexp.MustCompile(`^(.*:[0-9]+:[0-9]+): *(.*)`)
+
+// splitError splits an error's error message into a position string
+// and the actual error message. If there's no position information,
+// pos is the empty string, and msg is the entire error message.
+//
+func splitError(err error) (pos, msg string) {
+ msg = err.Error()
+ if m := posMsgRx.FindStringSubmatch(msg); len(m) == 3 {
+ pos = m[1]
+ msg = m[2]
+ }
+ return
+}
+
+func parseFiles(t *testing.T, filenames []string) ([]*ast.File, []error) {
+ var files []*ast.File
+ var errlist []error
+ for _, filename := range filenames {
+ file, err := parser.ParseFile(fset, filename, nil, parser.AllErrors)
+ if file == nil {
+ t.Fatalf("%s: %s", filename, err)
+ }
+ files = append(files, file)
+ if err != nil {
+ if list, _ := err.(scanner.ErrorList); len(list) > 0 {
+ for _, err := range list {
+ errlist = append(errlist, err)
+ }
+ } else {
+ errlist = append(errlist, err)
+ }
+ }
+ }
+ return files, errlist
+}
+
+// ERROR comments must start with text `ERROR "rx"` or `ERROR rx` where
+// rx is a regular expression that matches the expected error message.
+// Space around "rx" or rx is ignored. Use the form `ERROR HERE "rx"`
+// for error messages that are located immediately after rather than
+// at a token's position.
+//
+var errRx = regexp.MustCompile(`^ *ERROR *(HERE)? *"?([^"]*)"?`)
+
+// errMap collects the regular expressions of ERROR comments found
+// in files and returns them as a map of error positions to error messages.
+//
+func errMap(t *testing.T, testname string, files []*ast.File) map[string][]string {
+ // map of position strings to lists of error message patterns
+ errmap := make(map[string][]string)
+
+ for _, file := range files {
+ filename := fset.Position(file.Package).Filename
+ src, err := ioutil.ReadFile(filename)
+ if err != nil {
+ t.Fatalf("%s: could not read %s", testname, filename)
+ }
+
+ var s scanner.Scanner
+ s.Init(fset.AddFile(filename, -1, len(src)), src, nil, scanner.ScanComments)
+ var prev token.Pos // position of last non-comment, non-semicolon token
+ var here token.Pos // position immediately after the token at position prev
+
+ scanFile:
+ for {
+ pos, tok, lit := s.Scan()
+ switch tok {
+ case token.EOF:
+ break scanFile
+ case token.COMMENT:
+ if lit[1] == '*' {
+ lit = lit[:len(lit)-2] // strip trailing */
+ }
+ if s := errRx.FindStringSubmatch(lit[2:]); len(s) == 3 {
+ pos := prev
+ if s[1] == "HERE" {
+ pos = here
+ }
+ p := fset.Position(pos).String()
+ errmap[p] = append(errmap[p], strings.TrimSpace(s[2]))
+ }
+ case token.SEMICOLON:
+ // ignore automatically inserted semicolon
+ if lit == "\n" {
+ continue scanFile
+ }
+ fallthrough
+ default:
+ prev = pos
+ var l int // token length
+ if tok.IsLiteral() {
+ l = len(lit)
+ } else {
+ l = len(tok.String())
+ }
+ here = prev + token.Pos(l)
+ }
+ }
+ }
+
+ return errmap
+}
+
+func eliminate(t *testing.T, errmap map[string][]string, errlist []error) {
+ for _, err := range errlist {
+ pos, gotMsg := splitError(err)
+ list := errmap[pos]
+ index := -1 // list index of matching message, if any
+ // we expect one of the messages in list to match the error at pos
+ for i, wantRx := range list {
+ rx, err := regexp.Compile(wantRx)
+ if err != nil {
+ t.Errorf("%s: %v", pos, err)
+ continue
+ }
+ if rx.MatchString(gotMsg) {
+ index = i
+ break
+ }
+ }
+ if index >= 0 {
+ // eliminate from list
+ if n := len(list) - 1; n > 0 {
+ // not the last entry - swap in last element and shorten list by 1
+ list[index] = list[n]
+ errmap[pos] = list[:n]
+ } else {
+ // last entry - remove list from map
+ delete(errmap, pos)
+ }
+ } else {
+ t.Errorf("%s: no error expected: %q", pos, gotMsg)
+ }
+ }
+}
+
+func checkFiles(t *testing.T, testfiles []string) {
+ // parse files and collect parser errors
+ files, errlist := parseFiles(t, testfiles)
+
+ pkgName := ""
+ if len(files) > 0 {
+ pkgName = files[0].Name.Name
+ }
+
+ if *listErrors && len(errlist) > 0 {
+ t.Errorf("--- %s:", pkgName)
+ for _, err := range errlist {
+ t.Error(err)
+ }
+ }
+
+ // typecheck and collect typechecker errors
+ var conf Config
+ conf.Error = func(err error) {
+ if *listErrors {
+ t.Error(err)
+ return
+ }
+ // Ignore secondary error messages starting with "\t";
+ // they are clarifying messages for a primary error.
+ if !strings.Contains(err.Error(), ": \t") {
+ errlist = append(errlist, err)
+ }
+ }
+ conf.Check(pkgName, fset, files, nil)
+
+ if *listErrors {
+ return
+ }
+
+ // match and eliminate errors;
+ // we are expecting the following errors
+ errmap := errMap(t, pkgName, files)
+ eliminate(t, errmap, errlist)
+
+ // there should be no expected errors left
+ if len(errmap) > 0 {
+ t.Errorf("--- %s: %d source positions with expected (but not reported) errors:", pkgName, len(errmap))
+ for pos, list := range errmap {
+ for _, rx := range list {
+ t.Errorf("%s: %q", pos, rx)
+ }
+ }
+ }
+}
+
+func TestCheck(t *testing.T) {
+ // Declare builtins for testing.
+ DefPredeclaredTestFuncs()
+
+ // If explicit test files are specified, only check those.
+ if files := *testFiles; files != "" {
+ checkFiles(t, strings.Split(files, " "))
+ return
+ }
+
+ // Otherwise, run all the tests.
+ for _, files := range tests {
+ checkFiles(t, files)
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/conversions.go b/llgo/third_party/go.tools/go/types/conversions.go
new file mode 100644
index 0000000000000000000000000000000000000000..ea308be5a190882dd1a8217d0e1efe2a81069bb8
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/conversions.go
@@ -0,0 +1,146 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of conversions.
+
+package types
+
+import "llvm.org/llgo/third_party/go.tools/go/exact"
+
+// Conversion type-checks the conversion T(x).
+// The result is in x.
+func (check *Checker) conversion(x *operand, T Type) {
+ constArg := x.mode == constant
+
+ var ok bool
+ switch {
+ case constArg && isConstType(T):
+ // constant conversion
+ switch t := T.Underlying().(*Basic); {
+ case representableConst(x.val, check.conf, t.kind, &x.val):
+ ok = true
+ case x.isInteger() && isString(t):
+ codepoint := int64(-1)
+ if i, ok := exact.Int64Val(x.val); ok {
+ codepoint = i
+ }
+ // If codepoint < 0 the absolute value is too large (or unknown) for
+ // conversion. This is the same as converting any other out-of-range
+ // value - let string(codepoint) do the work.
+ x.val = exact.MakeString(string(codepoint))
+ ok = true
+ }
+ case x.convertibleTo(check.conf, T):
+ // non-constant conversion
+ x.mode = value
+ ok = true
+ }
+
+ if !ok {
+ check.errorf(x.pos(), "cannot convert %s to %s", x, T)
+ x.mode = invalid
+ return
+ }
+
+ // The conversion argument types are final. For untyped values the
+ // conversion provides the type, per the spec: "A constant may be
+ // given a type explicitly by a constant declaration or conversion,...".
+ final := x.typ
+ if isUntyped(x.typ) {
+ final = T
+ // - For conversions to interfaces, use the argument's default type.
+ // - For conversions of untyped constants to non-constant types, also
+ // use the default type (e.g., []byte("foo") should report string
+ // not []byte as type for the constant "foo").
+ // - Keep untyped nil for untyped nil arguments.
+ if isInterface(T) || constArg && !isConstType(T) {
+ final = defaultType(x.typ)
+ }
+ check.updateExprType(x.expr, final, true)
+ }
+
+ x.typ = T
+}
+
+func (x *operand) convertibleTo(conf *Config, T Type) bool {
+ // "x is assignable to T"
+ if x.assignableTo(conf, T) {
+ return true
+ }
+
+ // "x's type and T have identical underlying types"
+ V := x.typ
+ Vu := V.Underlying()
+ Tu := T.Underlying()
+ if Identical(Vu, Tu) {
+ return true
+ }
+
+ // "x's type and T are unnamed pointer types and their pointer base types have identical underlying types"
+ if V, ok := V.(*Pointer); ok {
+ if T, ok := T.(*Pointer); ok {
+ if Identical(V.base.Underlying(), T.base.Underlying()) {
+ return true
+ }
+ }
+ }
+
+ // "x's type and T are both integer or floating point types"
+ if (isInteger(V) || isFloat(V)) && (isInteger(T) || isFloat(T)) {
+ return true
+ }
+
+ // "x's type and T are both complex types"
+ if isComplex(V) && isComplex(T) {
+ return true
+ }
+
+ // "x is an integer or a slice of bytes or runes and T is a string type"
+ if (isInteger(V) || isBytesOrRunes(Vu)) && isString(T) {
+ return true
+ }
+
+ // "x is a string and T is a slice of bytes or runes"
+ if isString(V) && isBytesOrRunes(Tu) {
+ return true
+ }
+
+ // package unsafe:
+ // "any pointer or value of underlying type uintptr can be converted into a unsafe.Pointer"
+ if (isPointer(Vu) || isUintptr(Vu)) && isUnsafePointer(T) {
+ return true
+ }
+ // "and vice versa"
+ if isUnsafePointer(V) && (isPointer(Tu) || isUintptr(Tu)) {
+ return true
+ }
+
+ return false
+}
+
+func isUintptr(typ Type) bool {
+ t, ok := typ.Underlying().(*Basic)
+ return ok && t.kind == Uintptr
+}
+
+func isUnsafePointer(typ Type) bool {
+ // TODO(gri): Is this (typ.Underlying() instead of just typ) correct?
+ // The spec does not say so, but gc claims it is. See also
+ // issue 6326.
+ t, ok := typ.Underlying().(*Basic)
+ return ok && t.kind == UnsafePointer
+}
+
+func isPointer(typ Type) bool {
+ _, ok := typ.Underlying().(*Pointer)
+ return ok
+}
+
+func isBytesOrRunes(typ Type) bool {
+ if s, ok := typ.(*Slice); ok {
+ t, ok := s.elem.Underlying().(*Basic)
+ return ok && (t.kind == Byte || t.kind == Rune)
+ }
+ return false
+}
diff --git a/llgo/third_party/go.tools/go/types/decl.go b/llgo/third_party/go.tools/go/types/decl.go
new file mode 100644
index 0000000000000000000000000000000000000000..68d6f0e6cc8c219f7c61701e2cd53e41313b772b
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/decl.go
@@ -0,0 +1,419 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "go/ast"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+)
+
+func (check *Checker) reportAltDecl(obj Object) {
+ if pos := obj.Pos(); pos.IsValid() {
+ // We use "other" rather than "previous" here because
+ // the first declaration seen may not be textually
+ // earlier in the source.
+ check.errorf(pos, "\tother declaration of %s", obj.Name()) // secondary error, \t indented
+ }
+}
+
+func (check *Checker) declare(scope *Scope, id *ast.Ident, obj Object) {
+ // spec: "The blank identifier, represented by the underscore
+ // character _, may be used in a declaration like any other
+ // identifier but the declaration does not introduce a new
+ // binding."
+ if obj.Name() != "_" {
+ if alt := scope.Insert(obj); alt != nil {
+ check.errorf(obj.Pos(), "%s redeclared in this block", obj.Name())
+ check.reportAltDecl(alt)
+ return
+ }
+ }
+ if id != nil {
+ check.recordDef(id, obj)
+ }
+}
+
+// objDecl type-checks the declaration of obj in its respective (file) context.
+// See check.typ for the details on def and path.
+func (check *Checker) objDecl(obj Object, def *Named, path []*TypeName) {
+ if obj.Type() != nil {
+ return // already checked - nothing to do
+ }
+
+ if trace {
+ check.trace(obj.Pos(), "-- declaring %s", obj.Name())
+ check.indent++
+ defer func() {
+ check.indent--
+ check.trace(obj.Pos(), "=> %s", obj)
+ }()
+ }
+
+ d := check.objMap[obj]
+ if d == nil {
+ check.dump("%s: %s should have been declared", obj.Pos(), obj.Name())
+ unreachable()
+ }
+
+ // save/restore current context and setup object context
+ defer func(ctxt context) {
+ check.context = ctxt
+ }(check.context)
+ check.context = context{
+ scope: d.file,
+ }
+
+ // Const and var declarations must not have initialization
+ // cycles. We track them by remembering the current declaration
+ // in check.decl. Initialization expressions depending on other
+ // consts, vars, or functions, add dependencies to the current
+ // check.decl.
+ switch obj := obj.(type) {
+ case *Const:
+ check.decl = d // new package-level const decl
+ check.constDecl(obj, d.typ, d.init)
+ case *Var:
+ check.decl = d // new package-level var decl
+ check.varDecl(obj, d.lhs, d.typ, d.init)
+ case *TypeName:
+ // invalid recursive types are detected via path
+ check.typeDecl(obj, d.typ, def, path)
+ case *Func:
+ // functions may be recursive - no need to track dependencies
+ check.funcDecl(obj, d)
+ default:
+ unreachable()
+ }
+}
+
+func (check *Checker) constDecl(obj *Const, typ, init ast.Expr) {
+ assert(obj.typ == nil)
+
+ if obj.visited {
+ obj.typ = Typ[Invalid]
+ return
+ }
+ obj.visited = true
+
+ // use the correct value of iota
+ assert(check.iota == nil)
+ check.iota = obj.val
+ defer func() { check.iota = nil }()
+
+ // provide valid constant value under all circumstances
+ obj.val = exact.MakeUnknown()
+
+ // determine type, if any
+ if typ != nil {
+ t := check.typ(typ)
+ if !isConstType(t) {
+ check.errorf(typ.Pos(), "invalid constant type %s", t)
+ obj.typ = Typ[Invalid]
+ return
+ }
+ obj.typ = t
+ }
+
+ // check initialization
+ var x operand
+ if init != nil {
+ check.expr(&x, init)
+ }
+ check.initConst(obj, &x)
+}
+
+func (check *Checker) varDecl(obj *Var, lhs []*Var, typ, init ast.Expr) {
+ assert(obj.typ == nil)
+
+ if obj.visited {
+ obj.typ = Typ[Invalid]
+ return
+ }
+ obj.visited = true
+
+ // var declarations cannot use iota
+ assert(check.iota == nil)
+
+ // determine type, if any
+ if typ != nil {
+ obj.typ = check.typ(typ)
+ }
+
+ // check initialization
+ if init == nil {
+ if typ == nil {
+ // error reported before by arityMatch
+ obj.typ = Typ[Invalid]
+ }
+ return
+ }
+
+ if lhs == nil || len(lhs) == 1 {
+ assert(lhs == nil || lhs[0] == obj)
+ var x operand
+ check.expr(&x, init)
+ check.initVar(obj, &x, false)
+ return
+ }
+
+ if debug {
+ // obj must be one of lhs
+ found := false
+ for _, lhs := range lhs {
+ if obj == lhs {
+ found = true
+ break
+ }
+ }
+ if !found {
+ panic("inconsistent lhs")
+ }
+ }
+ check.initVars(lhs, []ast.Expr{init}, token.NoPos)
+}
+
+// underlying returns the underlying type of typ; possibly by following
+// forward chains of named types. Such chains only exist while named types
+// are incomplete.
+func underlying(typ Type) Type {
+ for {
+ n, _ := typ.(*Named)
+ if n == nil {
+ break
+ }
+ typ = n.underlying
+ }
+ return typ
+}
+
+func (n *Named) setUnderlying(typ Type) {
+ if n != nil {
+ n.underlying = typ
+ }
+}
+
+func (check *Checker) typeDecl(obj *TypeName, typ ast.Expr, def *Named, path []*TypeName) {
+ assert(obj.typ == nil)
+
+ // type declarations cannot use iota
+ assert(check.iota == nil)
+
+ named := &Named{obj: obj}
+ def.setUnderlying(named)
+ obj.typ = named // make sure recursive type declarations terminate
+
+ // determine underlying type of named
+ check.typExpr(typ, named, append(path, obj))
+
+ // The underlying type of named may be itself a named type that is
+ // incomplete:
+ //
+ // type (
+ // A B
+ // B *C
+ // C A
+ // )
+ //
+ // The type of C is the (named) type of A which is incomplete,
+ // and which has as its underlying type the named type B.
+ // Determine the (final, unnamed) underlying type by resolving
+ // any forward chain (they always end in an unnamed type).
+ named.underlying = underlying(named.underlying)
+
+ // check and add associated methods
+ // TODO(gri) It's easy to create pathological cases where the
+ // current approach is incorrect: In general we need to know
+ // and add all methods _before_ type-checking the type.
+ // See http://play.golang.org/p/WMpE0q2wK8
+ check.addMethodDecls(obj)
+}
+
+func (check *Checker) addMethodDecls(obj *TypeName) {
+ // get associated methods
+ methods := check.methods[obj.name]
+ if len(methods) == 0 {
+ return // no methods
+ }
+ delete(check.methods, obj.name)
+
+ // use an objset to check for name conflicts
+ var mset objset
+
+ // spec: "If the base type is a struct type, the non-blank method
+ // and field names must be distinct."
+ base := obj.typ.(*Named)
+ if t, _ := base.underlying.(*Struct); t != nil {
+ for _, fld := range t.fields {
+ if fld.name != "_" {
+ assert(mset.insert(fld) == nil)
+ }
+ }
+ }
+
+ // Checker.Files may be called multiple times; additional package files
+ // may add methods to already type-checked types. Add pre-existing methods
+ // so that we can detect redeclarations.
+ for _, m := range base.methods {
+ assert(m.name != "_")
+ assert(mset.insert(m) == nil)
+ }
+
+ // type-check methods
+ for _, m := range methods {
+ // spec: "For a base type, the non-blank names of methods bound
+ // to it must be unique."
+ if m.name != "_" {
+ if alt := mset.insert(m); alt != nil {
+ switch alt.(type) {
+ case *Var:
+ check.errorf(m.pos, "field and method with the same name %s", m.name)
+ case *Func:
+ check.errorf(m.pos, "method %s already declared for %s", m.name, base)
+ default:
+ unreachable()
+ }
+ check.reportAltDecl(alt)
+ continue
+ }
+ }
+ check.objDecl(m, nil, nil)
+ // methods with blank _ names cannot be found - don't keep them
+ if m.name != "_" {
+ base.methods = append(base.methods, m)
+ }
+ }
+}
+
+func (check *Checker) funcDecl(obj *Func, decl *declInfo) {
+ assert(obj.typ == nil)
+
+ // func declarations cannot use iota
+ assert(check.iota == nil)
+
+ sig := new(Signature)
+ obj.typ = sig // guard against cycles
+ fdecl := decl.fdecl
+ check.funcType(sig, fdecl.Recv, fdecl.Type)
+ if sig.recv == nil && obj.name == "init" && (sig.params.Len() > 0 || sig.results.Len() > 0) {
+ check.errorf(fdecl.Pos(), "func init must have no arguments and no return values")
+ // ok to continue
+ }
+
+ // function body must be type-checked after global declarations
+ // (functions implemented elsewhere have no body)
+ if !check.conf.IgnoreFuncBodies && fdecl.Body != nil {
+ check.later(obj.name, decl, sig, fdecl.Body)
+ }
+}
+
+func (check *Checker) declStmt(decl ast.Decl) {
+ pkg := check.pkg
+
+ switch d := decl.(type) {
+ case *ast.BadDecl:
+ // ignore
+
+ case *ast.GenDecl:
+ var last *ast.ValueSpec // last ValueSpec with type or init exprs seen
+ for iota, spec := range d.Specs {
+ switch s := spec.(type) {
+ case *ast.ValueSpec:
+ switch d.Tok {
+ case token.CONST:
+ // determine which init exprs to use
+ switch {
+ case s.Type != nil || len(s.Values) > 0:
+ last = s
+ case last == nil:
+ last = new(ast.ValueSpec) // make sure last exists
+ }
+
+ // declare all constants
+ lhs := make([]*Const, len(s.Names))
+ for i, name := range s.Names {
+ obj := NewConst(name.Pos(), pkg, name.Name, nil, exact.MakeInt64(int64(iota)))
+ lhs[i] = obj
+
+ var init ast.Expr
+ if i < len(last.Values) {
+ init = last.Values[i]
+ }
+
+ check.constDecl(obj, last.Type, init)
+ }
+
+ check.arityMatch(s, last)
+
+ for i, name := range s.Names {
+ check.declare(check.scope, name, lhs[i])
+ }
+
+ case token.VAR:
+ lhs0 := make([]*Var, len(s.Names))
+ for i, name := range s.Names {
+ lhs0[i] = NewVar(name.Pos(), pkg, name.Name, nil)
+ }
+
+ // initialize all variables
+ for i, obj := range lhs0 {
+ var lhs []*Var
+ var init ast.Expr
+ switch len(s.Values) {
+ case len(s.Names):
+ // lhs and rhs match
+ init = s.Values[i]
+ case 1:
+ // rhs is expected to be a multi-valued expression
+ lhs = lhs0
+ init = s.Values[0]
+ default:
+ if i < len(s.Values) {
+ init = s.Values[i]
+ }
+ }
+ check.varDecl(obj, lhs, s.Type, init)
+ if len(s.Values) == 1 {
+ // If we have a single lhs variable we are done either way.
+ // If we have a single rhs expression, it must be a multi-
+ // valued expression, in which case handling the first lhs
+ // variable will cause all lhs variables to have a type
+ // assigned, and we are done as well.
+ if debug {
+ for _, obj := range lhs0 {
+ assert(obj.typ != nil)
+ }
+ }
+ break
+ }
+ }
+
+ check.arityMatch(s, nil)
+
+ // declare all variables
+ // (only at this point are the variable scopes (parents) set)
+ for i, name := range s.Names {
+ check.declare(check.scope, name, lhs0[i])
+ }
+
+ default:
+ check.invalidAST(s.Pos(), "invalid token %s", d.Tok)
+ }
+
+ case *ast.TypeSpec:
+ obj := NewTypeName(s.Name.Pos(), pkg, s.Name.Name, nil)
+ check.declare(check.scope, s.Name, obj)
+ check.typeDecl(obj, s.Type, nil, nil)
+
+ default:
+ check.invalidAST(s.Pos(), "const, type, or var declaration expected")
+ }
+ }
+
+ default:
+ check.invalidAST(d.Pos(), "unknown ast.Decl node %T", d)
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/errors.go b/llgo/third_party/go.tools/go/types/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..0a9dd0e19ba141d68a61918bfbcd0cf0112f7650
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/errors.go
@@ -0,0 +1,96 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements various error reporters.
+
+package types
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "strings"
+)
+
+func assert(p bool) {
+ if !p {
+ panic("assertion failed")
+ }
+}
+
+func unreachable() {
+ panic("unreachable")
+}
+
+func (check *Checker) sprintf(format string, args ...interface{}) string {
+ for i, arg := range args {
+ switch a := arg.(type) {
+ case nil:
+ arg = ""
+ case operand:
+ panic("internal error: should always pass *operand")
+ case *operand:
+ arg = operandString(check.pkg, a)
+ case token.Pos:
+ arg = check.fset.Position(a).String()
+ case ast.Expr:
+ arg = ExprString(a)
+ case Object:
+ arg = ObjectString(check.pkg, a)
+ case Type:
+ arg = TypeString(check.pkg, a)
+ }
+ args[i] = arg
+ }
+ return fmt.Sprintf(format, args...)
+}
+
+func (check *Checker) trace(pos token.Pos, format string, args ...interface{}) {
+ fmt.Printf("%s:\t%s%s\n",
+ check.fset.Position(pos),
+ strings.Repeat(". ", check.indent),
+ check.sprintf(format, args...),
+ )
+}
+
+// dump is only needed for debugging
+func (check *Checker) dump(format string, args ...interface{}) {
+ fmt.Println(check.sprintf(format, args...))
+}
+
+func (check *Checker) err(pos token.Pos, msg string, soft bool) {
+ err := Error{check.fset, pos, msg, soft}
+ if check.firstErr == nil {
+ check.firstErr = err
+ }
+ f := check.conf.Error
+ if f == nil {
+ panic(bailout{}) // report only first error
+ }
+ f(err)
+}
+
+func (check *Checker) error(pos token.Pos, msg string) {
+ check.err(pos, msg, false)
+}
+
+func (check *Checker) errorf(pos token.Pos, format string, args ...interface{}) {
+ check.err(pos, check.sprintf(format, args...), false)
+}
+
+func (check *Checker) softErrorf(pos token.Pos, format string, args ...interface{}) {
+ check.err(pos, check.sprintf(format, args...), true)
+}
+
+func (check *Checker) invalidAST(pos token.Pos, format string, args ...interface{}) {
+ check.errorf(pos, "invalid AST: "+format, args...)
+}
+
+func (check *Checker) invalidArg(pos token.Pos, format string, args ...interface{}) {
+ check.errorf(pos, "invalid argument: "+format, args...)
+}
+
+func (check *Checker) invalidOp(pos token.Pos, format string, args ...interface{}) {
+ check.errorf(pos, "invalid operation: "+format, args...)
+}
diff --git a/llgo/third_party/go.tools/go/types/eval.go b/llgo/third_party/go.tools/go/types/eval.go
new file mode 100644
index 0000000000000000000000000000000000000000..8ad7e4699107c0a72807003062edb36adcb3bda9
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/eval.go
@@ -0,0 +1,109 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements New, Eval and EvalNode.
+
+package types
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+)
+
+// New is a convenience function to create a new type from a given
+// expression or type literal string evaluated in Universe scope.
+// New(str) is shorthand for Eval(str, nil, nil), but only returns
+// the type result, and panics in case of an error.
+// Position info for objects in the result type is undefined.
+//
+func New(str string) Type {
+ typ, _, err := Eval(str, nil, nil)
+ if err != nil {
+ panic(err)
+ }
+ return typ
+}
+
+// Eval returns the type and, if constant, the value for the
+// expression or type literal string str evaluated in scope.
+// If the expression contains function literals, the function
+// bodies are ignored (though they must be syntactically correct).
+//
+// If pkg == nil, the Universe scope is used and the provided
+// scope is ignored. Otherwise, the scope must belong to the
+// package (either the package scope, or nested within the
+// package scope).
+//
+// An error is returned if the scope is incorrect, the string
+// has syntax errors, or if it cannot be evaluated in the scope.
+// Position info for objects in the result type is undefined.
+//
+// Note: Eval should not be used instead of running Check to compute
+// types and values, but in addition to Check. Eval will re-evaluate
+// its argument each time, and it also does not know about the context
+// in which an expression is used (e.g., an assignment). Thus, top-
+// level untyped constants will return an untyped type rather then the
+// respective context-specific type.
+//
+func Eval(str string, pkg *Package, scope *Scope) (typ Type, val exact.Value, err error) {
+ node, err := parser.ParseExpr(str)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Create a file set that looks structurally identical to the
+ // one created by parser.ParseExpr for correct error positions.
+ fset := token.NewFileSet()
+ fset.AddFile("", len(str), fset.Base()).SetLinesForContent([]byte(str))
+
+ return EvalNode(fset, node, pkg, scope)
+}
+
+// EvalNode is like Eval but instead of string it accepts
+// an expression node and respective file set.
+//
+// An error is returned if the scope is incorrect
+// if the node cannot be evaluated in the scope.
+//
+func EvalNode(fset *token.FileSet, node ast.Expr, pkg *Package, scope *Scope) (typ Type, val exact.Value, err error) {
+ // verify package/scope relationship
+ if pkg == nil {
+ scope = Universe
+ } else {
+ s := scope
+ for s != nil && s != pkg.scope {
+ s = s.parent
+ }
+ // s == nil || s == pkg.scope
+ if s == nil {
+ return nil, nil, fmt.Errorf("scope does not belong to package %s", pkg.name)
+ }
+ }
+
+ // initialize checker
+ check := NewChecker(nil, fset, pkg, nil)
+ check.scope = scope
+ defer check.handleBailout(&err)
+
+ // evaluate node
+ var x operand
+ check.exprOrType(&x, node)
+ switch x.mode {
+ case invalid, novalue:
+ fallthrough
+ default:
+ unreachable() // or bailed out with error
+ case constant:
+ val = x.val
+ fallthrough
+ case typexpr, variable, mapindex, value, commaok:
+ typ = x.typ
+ }
+
+ return
+}
diff --git a/llgo/third_party/go.tools/go/types/eval_test.go b/llgo/third_party/go.tools/go/types/eval_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..f9cf592a1dabf969f668b85e39527df15a3f6f9c
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/eval_test.go
@@ -0,0 +1,148 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for Eval.
+
+package types_test
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "strings"
+ "testing"
+
+ _ "llvm.org/llgo/third_party/go.tools/go/gcimporter"
+ . "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+func testEval(t *testing.T, pkg *Package, scope *Scope, str string, typ Type, typStr, valStr string) {
+ gotTyp, gotVal, err := Eval(str, pkg, scope)
+ if err != nil {
+ t.Errorf("Eval(%q) failed: %s", str, err)
+ return
+ }
+ if gotTyp == nil {
+ t.Errorf("Eval(%q) got nil type but no error", str)
+ return
+ }
+
+ // compare types
+ if typ != nil {
+ // we have a type, check identity
+ if !Identical(gotTyp, typ) {
+ t.Errorf("Eval(%q) got type %s, want %s", str, gotTyp, typ)
+ return
+ }
+ } else {
+ // we have a string, compare type string
+ gotStr := gotTyp.String()
+ if gotStr != typStr {
+ t.Errorf("Eval(%q) got type %s, want %s", str, gotStr, typStr)
+ return
+ }
+ }
+
+ // compare values
+ gotStr := ""
+ if gotVal != nil {
+ gotStr = gotVal.String()
+ }
+ if gotStr != valStr {
+ t.Errorf("Eval(%q) got value %s, want %s", str, gotStr, valStr)
+ }
+}
+
+func TestEvalBasic(t *testing.T) {
+ for _, typ := range Typ[Bool : String+1] {
+ testEval(t, nil, nil, typ.Name(), typ, "", "")
+ }
+}
+
+func TestEvalComposite(t *testing.T) {
+ for _, test := range independentTestTypes {
+ testEval(t, nil, nil, test.src, nil, test.str, "")
+ }
+}
+
+func TestEvalArith(t *testing.T) {
+ var tests = []string{
+ `true`,
+ `false == false`,
+ `12345678 + 87654321 == 99999999`,
+ `10 * 20 == 200`,
+ `(1<<1000)*2 >> 100 == 2<<900`,
+ `"foo" + "bar" == "foobar"`,
+ `"abc" <= "bcd"`,
+ `len([10]struct{}{}) == 2*5`,
+ }
+ for _, test := range tests {
+ testEval(t, nil, nil, test, Typ[UntypedBool], "", "true")
+ }
+}
+
+func TestEvalContext(t *testing.T) {
+ src := `
+package p
+import "fmt"
+import m "math"
+const c = 3.0
+type T []int
+func f(a int, s string) float64 {
+ fmt.Println("calling f")
+ _ = m.Pi // use package math
+ const d int = c + 1
+ var x int
+ x = a + len(s)
+ return float64(x)
+}
+`
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, "p", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pkg, err := Check("p", fset, []*ast.File{file})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pkgScope := pkg.Scope()
+ if n := pkgScope.NumChildren(); n != 1 {
+ t.Fatalf("got %d file scopes, want 1", n)
+ }
+
+ fileScope := pkgScope.Child(0)
+ if n := fileScope.NumChildren(); n != 1 {
+ t.Fatalf("got %d functions scopes, want 1", n)
+ }
+
+ funcScope := fileScope.Child(0)
+
+ var tests = []string{
+ `true => true, untyped bool`,
+ `fmt.Println => , func(a ...interface{}) (n int, err error)`,
+ `c => 3, untyped float`,
+ `T => , p.T`,
+ `a => , int`,
+ `s => , string`,
+ `d => 4, int`,
+ `x => , int`,
+ `d/c => 1, int`,
+ `c/2 => 3/2, untyped float`,
+ `m.Pi < m.E => false, untyped bool`,
+ }
+ for _, test := range tests {
+ str, typ := split(test, ", ")
+ str, val := split(str, "=>")
+ testEval(t, pkg, funcScope, str, nil, typ, val)
+ }
+}
+
+// split splits string s at the first occurrence of s.
+func split(s, sep string) (string, string) {
+ i := strings.Index(s, sep)
+ return strings.TrimSpace(s[:i]), strings.TrimSpace(s[i+len(sep):])
+}
diff --git a/llgo/third_party/go.tools/go/types/expr.go b/llgo/third_party/go.tools/go/types/expr.go
new file mode 100644
index 0000000000000000000000000000000000000000..0e19fb970c3099a4bf1c16c8243733f30c28125c
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/expr.go
@@ -0,0 +1,1474 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of expressions.
+
+package types
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "math"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+)
+
+/*
+Basic algorithm:
+
+Expressions are checked recursively, top down. Expression checker functions
+are generally of the form:
+
+ func f(x *operand, e *ast.Expr, ...)
+
+where e is the expression to be checked, and x is the result of the check.
+The check performed by f may fail in which case x.mode == invalid, and
+related error messages will have been issued by f.
+
+If a hint argument is present, it is the composite literal element type
+of an outer composite literal; it is used to type-check composite literal
+elements that have no explicit type specification in the source
+(e.g.: []T{{...}, {...}}, the hint is the type T in this case).
+
+All expressions are checked via rawExpr, which dispatches according
+to expression kind. Upon returning, rawExpr is recording the types and
+constant values for all expressions that have an untyped type (those types
+may change on the way up in the expression tree). Usually these are constants,
+but the results of comparisons or non-constant shifts of untyped constants
+may also be untyped, but not constant.
+
+Untyped expressions may eventually become fully typed (i.e., not untyped),
+typically when the value is assigned to a variable, or is used otherwise.
+The updateExprType method is used to record this final type and update
+the recorded types: the type-checked expression tree is again traversed down,
+and the new type is propagated as needed. Untyped constant expression values
+that become fully typed must now be representable by the full type (constant
+sub-expression trees are left alone except for their roots). This mechanism
+ensures that a client sees the actual (run-time) type an untyped value would
+have. It also permits type-checking of lhs shift operands "as if the shift
+were not present": when updateExprType visits an untyped lhs shift operand
+and assigns it it's final type, that type must be an integer type, and a
+constant lhs must be representable as an integer.
+
+When an expression gets its final type, either on the way out from rawExpr,
+on the way down in updateExprType, or at the end of the type checker run,
+the type (and constant value, if any) is recorded via Info.Types, if present.
+*/
+
+type opPredicates map[token.Token]func(Type) bool
+
+var unaryOpPredicates = opPredicates{
+ token.ADD: isNumeric,
+ token.SUB: isNumeric,
+ token.XOR: isInteger,
+ token.NOT: isBoolean,
+}
+
+func (check *Checker) op(m opPredicates, x *operand, op token.Token) bool {
+ if pred := m[op]; pred != nil {
+ if !pred(x.typ) {
+ check.invalidOp(x.pos(), "operator %s not defined for %s", op, x)
+ return false
+ }
+ } else {
+ check.invalidAST(x.pos(), "unknown operator %s", op)
+ return false
+ }
+ return true
+}
+
+func (check *Checker) unary(x *operand, op token.Token) {
+ switch op {
+ case token.AND:
+ // spec: "As an exception to the addressability
+ // requirement x may also be a composite literal."
+ if _, ok := unparen(x.expr).(*ast.CompositeLit); !ok && x.mode != variable {
+ check.invalidOp(x.pos(), "cannot take address of %s", x)
+ x.mode = invalid
+ return
+ }
+ x.mode = value
+ x.typ = &Pointer{base: x.typ}
+ return
+
+ case token.ARROW:
+ typ, ok := x.typ.Underlying().(*Chan)
+ if !ok {
+ check.invalidOp(x.pos(), "cannot receive from non-channel %s", x)
+ x.mode = invalid
+ return
+ }
+ if typ.dir == SendOnly {
+ check.invalidOp(x.pos(), "cannot receive from send-only channel %s", x)
+ x.mode = invalid
+ return
+ }
+ x.mode = commaok
+ x.typ = typ.elem
+ check.hasCallOrRecv = true
+ return
+ }
+
+ if !check.op(unaryOpPredicates, x, op) {
+ x.mode = invalid
+ return
+ }
+
+ if x.mode == constant {
+ typ := x.typ.Underlying().(*Basic)
+ size := -1
+ if isUnsigned(typ) {
+ size = int(check.conf.sizeof(typ))
+ }
+ x.val = exact.UnaryOp(op, x.val, size)
+ // Typed constants must be representable in
+ // their type after each constant operation.
+ if isTyped(typ) {
+ check.representable(x, typ)
+ }
+ return
+ }
+
+ x.mode = value
+ // x.typ remains unchanged
+}
+
+func isShift(op token.Token) bool {
+ return op == token.SHL || op == token.SHR
+}
+
+func isComparison(op token.Token) bool {
+ // Note: tokens are not ordered well to make this much easier
+ switch op {
+ case token.EQL, token.NEQ, token.LSS, token.LEQ, token.GTR, token.GEQ:
+ return true
+ }
+ return false
+}
+
+func fitsFloat32(x exact.Value) bool {
+ f32, _ := exact.Float32Val(x)
+ f := float64(f32)
+ return !math.IsInf(f, 0)
+}
+
+func roundFloat32(x exact.Value) exact.Value {
+ f32, _ := exact.Float32Val(x)
+ f := float64(f32)
+ if !math.IsInf(f, 0) {
+ return exact.MakeFloat64(f)
+ }
+ return nil
+}
+
+func fitsFloat64(x exact.Value) bool {
+ f, _ := exact.Float64Val(x)
+ return !math.IsInf(f, 0)
+}
+
+func roundFloat64(x exact.Value) exact.Value {
+ f, _ := exact.Float64Val(x)
+ if !math.IsInf(f, 0) {
+ return exact.MakeFloat64(f)
+ }
+ return nil
+}
+
+// representableConst reports whether x can be represented as
+// value of the given basic type kind and for the configuration
+// provided (only needed for int/uint sizes).
+//
+// If rounded != nil, *rounded is set to the rounded value of x for
+// representable floating-point values; it is left alone otherwise.
+// It is ok to provide the addressof the first argument for rounded.
+func representableConst(x exact.Value, conf *Config, as BasicKind, rounded *exact.Value) bool {
+ switch x.Kind() {
+ case exact.Unknown:
+ return true
+
+ case exact.Bool:
+ return as == Bool || as == UntypedBool
+
+ case exact.Int:
+ if x, ok := exact.Int64Val(x); ok {
+ switch as {
+ case Int:
+ var s = uint(conf.sizeof(Typ[as])) * 8
+ return int64(-1)<<(s-1) <= x && x <= int64(1)<<(s-1)-1
+ case Int8:
+ const s = 8
+ return -1<<(s-1) <= x && x <= 1<<(s-1)-1
+ case Int16:
+ const s = 16
+ return -1<<(s-1) <= x && x <= 1<<(s-1)-1
+ case Int32:
+ const s = 32
+ return -1<<(s-1) <= x && x <= 1<<(s-1)-1
+ case Int64:
+ return true
+ case Uint, Uintptr:
+ if s := uint(conf.sizeof(Typ[as])) * 8; s < 64 {
+ return 0 <= x && x <= int64(1)<= 0 && n <= int(s)
+ case Uint64:
+ return exact.Sign(x) >= 0 && n <= 64
+ case Float32, Complex64:
+ if rounded == nil {
+ return fitsFloat32(x)
+ }
+ r := roundFloat32(x)
+ if r != nil {
+ *rounded = r
+ return true
+ }
+ case Float64, Complex128:
+ if rounded == nil {
+ return fitsFloat64(x)
+ }
+ r := roundFloat64(x)
+ if r != nil {
+ *rounded = r
+ return true
+ }
+ case UntypedInt, UntypedFloat, UntypedComplex:
+ return true
+ }
+
+ case exact.Float:
+ switch as {
+ case Float32, Complex64:
+ if rounded == nil {
+ return fitsFloat32(x)
+ }
+ r := roundFloat32(x)
+ if r != nil {
+ *rounded = r
+ return true
+ }
+ case Float64, Complex128:
+ if rounded == nil {
+ return fitsFloat64(x)
+ }
+ r := roundFloat64(x)
+ if r != nil {
+ *rounded = r
+ return true
+ }
+ case UntypedFloat, UntypedComplex:
+ return true
+ }
+
+ case exact.Complex:
+ switch as {
+ case Complex64:
+ if rounded == nil {
+ return fitsFloat32(exact.Real(x)) && fitsFloat32(exact.Imag(x))
+ }
+ re := roundFloat32(exact.Real(x))
+ im := roundFloat32(exact.Imag(x))
+ if re != nil && im != nil {
+ *rounded = exact.BinaryOp(re, token.ADD, exact.MakeImag(im))
+ return true
+ }
+ case Complex128:
+ if rounded == nil {
+ return fitsFloat64(exact.Real(x)) && fitsFloat64(exact.Imag(x))
+ }
+ re := roundFloat64(exact.Real(x))
+ im := roundFloat64(exact.Imag(x))
+ if re != nil && im != nil {
+ *rounded = exact.BinaryOp(re, token.ADD, exact.MakeImag(im))
+ return true
+ }
+ case UntypedComplex:
+ return true
+ }
+
+ case exact.String:
+ return as == String || as == UntypedString
+
+ default:
+ unreachable()
+ }
+
+ return false
+}
+
+// representable checks that a constant operand is representable in the given basic type.
+func (check *Checker) representable(x *operand, typ *Basic) {
+ assert(x.mode == constant)
+ if !representableConst(x.val, check.conf, typ.kind, &x.val) {
+ var msg string
+ if isNumeric(x.typ) && isNumeric(typ) {
+ // numeric conversion : error msg
+ //
+ // integer -> integer : overflows
+ // integer -> float : overflows (actually not possible)
+ // float -> integer : truncated
+ // float -> float : overflows
+ //
+ if !isInteger(x.typ) && isInteger(typ) {
+ msg = "%s truncated to %s"
+ } else {
+ msg = "%s overflows %s"
+ }
+ } else {
+ msg = "cannot convert %s to %s"
+ }
+ check.errorf(x.pos(), msg, x, typ)
+ x.mode = invalid
+ }
+}
+
+// updateExprType updates the type of x to typ and invokes itself
+// recursively for the operands of x, depending on expression kind.
+// If typ is still an untyped and not the final type, updateExprType
+// only updates the recorded untyped type for x and possibly its
+// operands. Otherwise (i.e., typ is not an untyped type anymore,
+// or it is the final type for x), the type and value are recorded.
+// Also, if x is a constant, it must be representable as a value of typ,
+// and if x is the (formerly untyped) lhs operand of a non-constant
+// shift, it must be an integer value.
+//
+func (check *Checker) updateExprType(x ast.Expr, typ Type, final bool) {
+ old, found := check.untyped[x]
+ if !found {
+ return // nothing to do
+ }
+
+ // update operands of x if necessary
+ switch x := x.(type) {
+ case *ast.BadExpr,
+ *ast.FuncLit,
+ *ast.CompositeLit,
+ *ast.IndexExpr,
+ *ast.SliceExpr,
+ *ast.TypeAssertExpr,
+ *ast.StarExpr,
+ *ast.KeyValueExpr,
+ *ast.ArrayType,
+ *ast.StructType,
+ *ast.FuncType,
+ *ast.InterfaceType,
+ *ast.MapType,
+ *ast.ChanType:
+ // These expression are never untyped - nothing to do.
+ // The respective sub-expressions got their final types
+ // upon assignment or use.
+ if debug {
+ check.dump("%s: found old type(%s): %s (new: %s)", x.Pos(), x, old.typ, typ)
+ unreachable()
+ }
+ return
+
+ case *ast.CallExpr:
+ // Resulting in an untyped constant (e.g., built-in complex).
+ // The respective calls take care of calling updateExprType
+ // for the arguments if necessary.
+
+ case *ast.Ident, *ast.BasicLit, *ast.SelectorExpr:
+ // An identifier denoting a constant, a constant literal,
+ // or a qualified identifier (imported untyped constant).
+ // No operands to take care of.
+
+ case *ast.ParenExpr:
+ check.updateExprType(x.X, typ, final)
+
+ case *ast.UnaryExpr:
+ // If x is a constant, the operands were constants.
+ // They don't need to be updated since they never
+ // get "materialized" into a typed value; and they
+ // will be processed at the end of the type check.
+ if old.val != nil {
+ break
+ }
+ check.updateExprType(x.X, typ, final)
+
+ case *ast.BinaryExpr:
+ if old.val != nil {
+ break // see comment for unary expressions
+ }
+ if isComparison(x.Op) {
+ // The result type is independent of operand types
+ // and the operand types must have final types.
+ } else if isShift(x.Op) {
+ // The result type depends only on lhs operand.
+ // The rhs type was updated when checking the shift.
+ check.updateExprType(x.X, typ, final)
+ } else {
+ // The operand types match the result type.
+ check.updateExprType(x.X, typ, final)
+ check.updateExprType(x.Y, typ, final)
+ }
+
+ default:
+ unreachable()
+ }
+
+ // If the new type is not final and still untyped, just
+ // update the recorded type.
+ if !final && isUntyped(typ) {
+ old.typ = typ.Underlying().(*Basic)
+ check.untyped[x] = old
+ return
+ }
+
+ // Otherwise we have the final (typed or untyped type).
+ // Remove it from the map of yet untyped expressions.
+ delete(check.untyped, x)
+
+ // If x is the lhs of a shift, its final type must be integer.
+ // We already know from the shift check that it is representable
+ // as an integer if it is a constant.
+ if old.isLhs && !isInteger(typ) {
+ check.invalidOp(x.Pos(), "shifted operand %s (type %s) must be integer", x, typ)
+ return
+ }
+
+ // Everything's fine, record final type and value for x.
+ check.recordTypeAndValue(x, old.mode, typ, old.val)
+}
+
+// updateExprVal updates the value of x to val.
+func (check *Checker) updateExprVal(x ast.Expr, val exact.Value) {
+ if info, ok := check.untyped[x]; ok {
+ info.val = val
+ check.untyped[x] = info
+ }
+}
+
+// convertUntyped attempts to set the type of an untyped value to the target type.
+func (check *Checker) convertUntyped(x *operand, target Type) {
+ if x.mode == invalid || isTyped(x.typ) || target == Typ[Invalid] {
+ return
+ }
+
+ // TODO(gri) Sloppy code - clean up. This function is central
+ // to assignment and expression checking.
+
+ if isUntyped(target) {
+ // both x and target are untyped
+ xkind := x.typ.(*Basic).kind
+ tkind := target.(*Basic).kind
+ if isNumeric(x.typ) && isNumeric(target) {
+ if xkind < tkind {
+ x.typ = target
+ check.updateExprType(x.expr, target, false)
+ }
+ } else if xkind != tkind {
+ goto Error
+ }
+ return
+ }
+
+ // typed target
+ switch t := target.Underlying().(type) {
+ case *Basic:
+ if x.mode == constant {
+ check.representable(x, t)
+ if x.mode == invalid {
+ return
+ }
+ // expression value may have been rounded - update if needed
+ // TODO(gri) A floating-point value may silently underflow to
+ // zero. If it was negative, the sign is lost. See issue 6898.
+ check.updateExprVal(x.expr, x.val)
+ } else {
+ // Non-constant untyped values may appear as the
+ // result of comparisons (untyped bool), intermediate
+ // (delayed-checked) rhs operands of shifts, and as
+ // the value nil.
+ switch x.typ.(*Basic).kind {
+ case UntypedBool:
+ if !isBoolean(target) {
+ goto Error
+ }
+ case UntypedInt, UntypedRune, UntypedFloat, UntypedComplex:
+ if !isNumeric(target) {
+ goto Error
+ }
+ case UntypedString:
+ // Non-constant untyped string values are not
+ // permitted by the spec and should not occur.
+ unreachable()
+ case UntypedNil:
+ // Unsafe.Pointer is a basic type that includes nil.
+ if !hasNil(target) {
+ goto Error
+ }
+ default:
+ goto Error
+ }
+ }
+ case *Interface:
+ if !x.isNil() && !t.Empty() /* empty interfaces are ok */ {
+ goto Error
+ }
+ // Update operand types to the default type rather then
+ // the target (interface) type: values must have concrete
+ // dynamic types. If the value is nil, keep it untyped
+ // (this is important for tools such as go vet which need
+ // the dynamic type for argument checking of say, print
+ // functions)
+ if x.isNil() {
+ target = Typ[UntypedNil]
+ } else {
+ // cannot assign untyped values to non-empty interfaces
+ if !t.Empty() {
+ goto Error
+ }
+ target = defaultType(x.typ)
+ }
+ case *Pointer, *Signature, *Slice, *Map, *Chan:
+ if !x.isNil() {
+ goto Error
+ }
+ // keep nil untyped - see comment for interfaces, above
+ target = Typ[UntypedNil]
+ default:
+ goto Error
+ }
+
+ x.typ = target
+ check.updateExprType(x.expr, target, true) // UntypedNils are final
+ return
+
+Error:
+ check.errorf(x.pos(), "cannot convert %s to %s", x, target)
+ x.mode = invalid
+}
+
+func (check *Checker) comparison(x, y *operand, op token.Token) {
+ // spec: "In any comparison, the first operand must be assignable
+ // to the type of the second operand, or vice versa."
+ err := ""
+ if x.assignableTo(check.conf, y.typ) || y.assignableTo(check.conf, x.typ) {
+ defined := false
+ switch op {
+ case token.EQL, token.NEQ:
+ // spec: "The equality operators == and != apply to operands that are comparable."
+ defined = Comparable(x.typ) || x.isNil() && hasNil(y.typ) || y.isNil() && hasNil(x.typ)
+ case token.LSS, token.LEQ, token.GTR, token.GEQ:
+ // spec: The ordering operators <, <=, >, and >= apply to operands that are ordered."
+ defined = isOrdered(x.typ)
+ default:
+ unreachable()
+ }
+ if !defined {
+ typ := x.typ
+ if x.isNil() {
+ typ = y.typ
+ }
+ err = check.sprintf("operator %s not defined for %s", op, typ)
+ }
+ } else {
+ err = check.sprintf("mismatched types %s and %s", x.typ, y.typ)
+ }
+
+ if err != "" {
+ check.errorf(x.pos(), "cannot compare %s %s %s (%s)", x.expr, op, y.expr, err)
+ x.mode = invalid
+ return
+ }
+
+ if x.mode == constant && y.mode == constant {
+ x.val = exact.MakeBool(exact.Compare(x.val, op, y.val))
+ // The operands are never materialized; no need to update
+ // their types.
+ } else {
+ x.mode = value
+ // The operands have now their final types, which at run-
+ // time will be materialized. Update the expression trees.
+ // If the current types are untyped, the materialized type
+ // is the respective default type.
+ check.updateExprType(x.expr, defaultType(x.typ), true)
+ check.updateExprType(y.expr, defaultType(y.typ), true)
+ }
+
+ // spec: "Comparison operators compare two operands and yield
+ // an untyped boolean value."
+ x.typ = Typ[UntypedBool]
+}
+
+func (check *Checker) shift(x, y *operand, op token.Token) {
+ untypedx := isUntyped(x.typ)
+
+ // The lhs must be of integer type or be representable
+ // as an integer; otherwise the shift has no chance.
+ if !isInteger(x.typ) && (!untypedx || !representableConst(x.val, nil, UntypedInt, nil)) {
+ check.invalidOp(x.pos(), "shifted operand %s must be integer", x)
+ x.mode = invalid
+ return
+ }
+
+ // spec: "The right operand in a shift expression must have unsigned
+ // integer type or be an untyped constant that can be converted to
+ // unsigned integer type."
+ switch {
+ case isInteger(y.typ) && isUnsigned(y.typ):
+ // nothing to do
+ case isUntyped(y.typ):
+ check.convertUntyped(y, Typ[UntypedInt])
+ if y.mode == invalid {
+ x.mode = invalid
+ return
+ }
+ default:
+ check.invalidOp(y.pos(), "shift count %s must be unsigned integer", y)
+ x.mode = invalid
+ return
+ }
+
+ if x.mode == constant {
+ if y.mode == constant {
+ // rhs must be within reasonable bounds
+ const stupidShift = 1023 - 1 + 52 // so we can express smallestFloat64
+ s, ok := exact.Uint64Val(y.val)
+ if !ok || s > stupidShift {
+ check.invalidOp(y.pos(), "stupid shift count %s", y)
+ x.mode = invalid
+ return
+ }
+ // The lhs is representable as an integer but may not be an integer
+ // (e.g., 2.0, an untyped float) - this can only happen for untyped
+ // non-integer numeric constants. Correct the type so that the shift
+ // result is of integer type.
+ if !isInteger(x.typ) {
+ x.typ = Typ[UntypedInt]
+ }
+ x.val = exact.Shift(x.val, op, uint(s))
+ return
+ }
+
+ // non-constant shift with constant lhs
+ if untypedx {
+ // spec: "If the left operand of a non-constant shift
+ // expression is an untyped constant, the type of the
+ // constant is what it would be if the shift expression
+ // were replaced by its left operand alone.".
+ //
+ // Delay operand checking until we know the final type:
+ // The lhs expression must be in the untyped map, mark
+ // the entry as lhs shift operand.
+ info, found := check.untyped[x.expr]
+ assert(found)
+ info.isLhs = true
+ check.untyped[x.expr] = info
+ // keep x's type
+ x.mode = value
+ return
+ }
+ }
+
+ // constant rhs must be >= 0
+ if y.mode == constant && exact.Sign(y.val) < 0 {
+ check.invalidOp(y.pos(), "shift count %s must not be negative", y)
+ }
+
+ // non-constant shift - lhs must be an integer
+ if !isInteger(x.typ) {
+ check.invalidOp(x.pos(), "shifted operand %s must be integer", x)
+ x.mode = invalid
+ return
+ }
+
+ x.mode = value
+}
+
+var binaryOpPredicates = opPredicates{
+ token.ADD: func(typ Type) bool { return isNumeric(typ) || isString(typ) },
+ token.SUB: isNumeric,
+ token.MUL: isNumeric,
+ token.QUO: isNumeric,
+ token.REM: isInteger,
+
+ token.AND: isInteger,
+ token.OR: isInteger,
+ token.XOR: isInteger,
+ token.AND_NOT: isInteger,
+
+ token.LAND: isBoolean,
+ token.LOR: isBoolean,
+}
+
+func (check *Checker) binary(x *operand, lhs, rhs ast.Expr, op token.Token) {
+ var y operand
+
+ check.expr(x, lhs)
+ check.expr(&y, rhs)
+
+ if x.mode == invalid {
+ return
+ }
+ if y.mode == invalid {
+ x.mode = invalid
+ x.expr = y.expr
+ return
+ }
+
+ if isShift(op) {
+ check.shift(x, &y, op)
+ return
+ }
+
+ check.convertUntyped(x, y.typ)
+ if x.mode == invalid {
+ return
+ }
+ check.convertUntyped(&y, x.typ)
+ if y.mode == invalid {
+ x.mode = invalid
+ return
+ }
+
+ if isComparison(op) {
+ check.comparison(x, &y, op)
+ return
+ }
+
+ if !Identical(x.typ, y.typ) {
+ // only report an error if we have valid types
+ // (otherwise we had an error reported elsewhere already)
+ if x.typ != Typ[Invalid] && y.typ != Typ[Invalid] {
+ check.invalidOp(x.pos(), "mismatched types %s and %s", x.typ, y.typ)
+ }
+ x.mode = invalid
+ return
+ }
+
+ if !check.op(binaryOpPredicates, x, op) {
+ x.mode = invalid
+ return
+ }
+
+ if (op == token.QUO || op == token.REM) && (x.mode == constant || isInteger(x.typ)) && y.mode == constant && exact.Sign(y.val) == 0 {
+ check.invalidOp(y.pos(), "division by zero")
+ x.mode = invalid
+ return
+ }
+
+ if x.mode == constant && y.mode == constant {
+ typ := x.typ.Underlying().(*Basic)
+ // force integer division of integer operands
+ if op == token.QUO && isInteger(typ) {
+ op = token.QUO_ASSIGN
+ }
+ x.val = exact.BinaryOp(x.val, op, y.val)
+ // Typed constants must be representable in
+ // their type after each constant operation.
+ if isTyped(typ) {
+ check.representable(x, typ)
+ }
+ return
+ }
+
+ x.mode = value
+ // x.typ is unchanged
+}
+
+// index checks an index expression for validity.
+// If max >= 0, it is the upper bound for index.
+// If index is valid and the result i >= 0, then i is the constant value of index.
+func (check *Checker) index(index ast.Expr, max int64) (i int64, valid bool) {
+ var x operand
+ check.expr(&x, index)
+ if x.mode == invalid {
+ return
+ }
+
+ // an untyped constant must be representable as Int
+ check.convertUntyped(&x, Typ[Int])
+ if x.mode == invalid {
+ return
+ }
+
+ // the index must be of integer type
+ if !isInteger(x.typ) {
+ check.invalidArg(x.pos(), "index %s must be integer", &x)
+ return
+ }
+
+ // a constant index i must be in bounds
+ if x.mode == constant {
+ if exact.Sign(x.val) < 0 {
+ check.invalidArg(x.pos(), "index %s must not be negative", &x)
+ return
+ }
+ i, valid = exact.Int64Val(x.val)
+ if !valid || max >= 0 && i >= max {
+ check.errorf(x.pos(), "index %s is out of bounds", &x)
+ return i, false
+ }
+ // 0 <= i [ && i < max ]
+ return i, true
+ }
+
+ return -1, true
+}
+
+// indexElts checks the elements (elts) of an array or slice composite literal
+// against the literal's element type (typ), and the element indices against
+// the literal length if known (length >= 0). It returns the length of the
+// literal (maximum index value + 1).
+//
+func (check *Checker) indexedElts(elts []ast.Expr, typ Type, length int64) int64 {
+ visited := make(map[int64]bool, len(elts))
+ var index, max int64
+ for _, e := range elts {
+ // determine and check index
+ validIndex := false
+ eval := e
+ if kv, _ := e.(*ast.KeyValueExpr); kv != nil {
+ if i, ok := check.index(kv.Key, length); ok {
+ if i >= 0 {
+ index = i
+ validIndex = true
+ } else {
+ check.errorf(e.Pos(), "index %s must be integer constant", kv.Key)
+ }
+ }
+ eval = kv.Value
+ } else if length >= 0 && index >= length {
+ check.errorf(e.Pos(), "index %d is out of bounds (>= %d)", index, length)
+ } else {
+ validIndex = true
+ }
+
+ // if we have a valid index, check for duplicate entries
+ if validIndex {
+ if visited[index] {
+ check.errorf(e.Pos(), "duplicate index %d in array or slice literal", index)
+ }
+ visited[index] = true
+ }
+ index++
+ if index > max {
+ max = index
+ }
+
+ // check element against composite literal element type
+ var x operand
+ check.exprWithHint(&x, eval, typ)
+ if !check.assignment(&x, typ) && x.mode != invalid {
+ check.errorf(x.pos(), "cannot use %s as %s value in array or slice literal", &x, typ)
+ }
+ }
+ return max
+}
+
+// exprKind describes the kind of an expression; the kind
+// determines if an expression is valid in 'statement context'.
+type exprKind int
+
+const (
+ conversion exprKind = iota
+ expression
+ statement
+)
+
+// rawExpr typechecks expression e and initializes x with the expression
+// value or type. If an error occurred, x.mode is set to invalid.
+// If hint != nil, it is the type of a composite literal element.
+//
+func (check *Checker) rawExpr(x *operand, e ast.Expr, hint Type) exprKind {
+ if trace {
+ check.trace(e.Pos(), "%s", e)
+ check.indent++
+ defer func() {
+ check.indent--
+ check.trace(e.Pos(), "=> %s", x)
+ }()
+ }
+
+ kind := check.exprInternal(x, e, hint)
+
+ // convert x into a user-friendly set of values
+ // TODO(gri) this code can be simplified
+ var typ Type
+ var val exact.Value
+ switch x.mode {
+ case invalid:
+ typ = Typ[Invalid]
+ case novalue:
+ typ = (*Tuple)(nil)
+ case constant:
+ typ = x.typ
+ val = x.val
+ default:
+ typ = x.typ
+ }
+ assert(x.expr != nil && typ != nil)
+
+ if isUntyped(typ) {
+ // delay type and value recording until we know the type
+ // or until the end of type checking
+ check.rememberUntyped(x.expr, false, x.mode, typ.(*Basic), val)
+ } else {
+ check.recordTypeAndValue(e, x.mode, typ, val)
+ }
+
+ return kind
+}
+
+// exprInternal contains the core of type checking of expressions.
+// Must only be called by rawExpr.
+//
+func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind {
+ // make sure x has a valid state in case of bailout
+ // (was issue 5770)
+ x.mode = invalid
+ x.typ = Typ[Invalid]
+
+ switch e := e.(type) {
+ case *ast.BadExpr:
+ goto Error // error was reported before
+
+ case *ast.Ident:
+ check.ident(x, e, nil, nil)
+
+ case *ast.Ellipsis:
+ // ellipses are handled explicitly where they are legal
+ // (array composite literals and parameter lists)
+ check.error(e.Pos(), "invalid use of '...'")
+ goto Error
+
+ case *ast.BasicLit:
+ x.setConst(e.Kind, e.Value)
+ if x.mode == invalid {
+ check.invalidAST(e.Pos(), "invalid literal %v", e.Value)
+ goto Error
+ }
+
+ case *ast.FuncLit:
+ if sig, ok := check.typ(e.Type).(*Signature); ok {
+ // Anonymous functions are considered part of the
+ // init expression/func declaration which contains
+ // them: use existing package-level declaration info.
+ check.funcBody(check.decl, "", sig, e.Body)
+ x.mode = value
+ x.typ = sig
+ } else {
+ check.invalidAST(e.Pos(), "invalid function literal %s", e)
+ goto Error
+ }
+
+ case *ast.CompositeLit:
+ typ := hint
+ openArray := false
+ if e.Type != nil {
+ // [...]T array types may only appear with composite literals.
+ // Check for them here so we don't have to handle ... in general.
+ typ = nil
+ if atyp, _ := e.Type.(*ast.ArrayType); atyp != nil && atyp.Len != nil {
+ if ellip, _ := atyp.Len.(*ast.Ellipsis); ellip != nil && ellip.Elt == nil {
+ // We have an "open" [...]T array type.
+ // Create a new ArrayType with unknown length (-1)
+ // and finish setting it up after analyzing the literal.
+ typ = &Array{len: -1, elem: check.typ(atyp.Elt)}
+ openArray = true
+ }
+ }
+ if typ == nil {
+ typ = check.typ(e.Type)
+ }
+ }
+ if typ == nil {
+ check.error(e.Pos(), "missing type in composite literal")
+ goto Error
+ }
+
+ switch typ, _ := deref(typ); utyp := typ.Underlying().(type) {
+ case *Struct:
+ if len(e.Elts) == 0 {
+ break
+ }
+ fields := utyp.fields
+ if _, ok := e.Elts[0].(*ast.KeyValueExpr); ok {
+ // all elements must have keys
+ visited := make([]bool, len(fields))
+ for _, e := range e.Elts {
+ kv, _ := e.(*ast.KeyValueExpr)
+ if kv == nil {
+ check.error(e.Pos(), "mixture of field:value and value elements in struct literal")
+ continue
+ }
+ key, _ := kv.Key.(*ast.Ident)
+ if key == nil {
+ check.errorf(kv.Pos(), "invalid field name %s in struct literal", kv.Key)
+ continue
+ }
+ i := fieldIndex(utyp.fields, check.pkg, key.Name)
+ if i < 0 {
+ check.errorf(kv.Pos(), "unknown field %s in struct literal", key.Name)
+ continue
+ }
+ fld := fields[i]
+ check.recordUse(key, fld)
+ // 0 <= i < len(fields)
+ if visited[i] {
+ check.errorf(kv.Pos(), "duplicate field name %s in struct literal", key.Name)
+ continue
+ }
+ visited[i] = true
+ check.expr(x, kv.Value)
+ etyp := fld.typ
+ if !check.assignment(x, etyp) {
+ if x.mode != invalid {
+ check.errorf(x.pos(), "cannot use %s as %s value in struct literal", x, etyp)
+ }
+ continue
+ }
+ }
+ } else {
+ // no element must have a key
+ for i, e := range e.Elts {
+ if kv, _ := e.(*ast.KeyValueExpr); kv != nil {
+ check.error(kv.Pos(), "mixture of field:value and value elements in struct literal")
+ continue
+ }
+ check.expr(x, e)
+ if i >= len(fields) {
+ check.error(x.pos(), "too many values in struct literal")
+ break // cannot continue
+ }
+ // i < len(fields)
+ etyp := fields[i].typ
+ if !check.assignment(x, etyp) {
+ if x.mode != invalid {
+ check.errorf(x.pos(), "cannot use %s as %s value in struct literal", x, etyp)
+ }
+ continue
+ }
+ }
+ if len(e.Elts) < len(fields) {
+ check.error(e.Rbrace, "too few values in struct literal")
+ // ok to continue
+ }
+ }
+
+ case *Array:
+ n := check.indexedElts(e.Elts, utyp.elem, utyp.len)
+ // if we have an "open" [...]T array, set the length now that we know it
+ if openArray {
+ utyp.len = n
+ }
+
+ case *Slice:
+ check.indexedElts(e.Elts, utyp.elem, -1)
+
+ case *Map:
+ visited := make(map[interface{}][]Type, len(e.Elts))
+ for _, e := range e.Elts {
+ kv, _ := e.(*ast.KeyValueExpr)
+ if kv == nil {
+ check.error(e.Pos(), "missing key in map literal")
+ continue
+ }
+ check.expr(x, kv.Key)
+ if !check.assignment(x, utyp.key) {
+ if x.mode != invalid {
+ check.errorf(x.pos(), "cannot use %s as %s key in map literal", x, utyp.key)
+ }
+ continue
+ }
+ if x.mode == constant {
+ duplicate := false
+ // if the key is of interface type, the type is also significant when checking for duplicates
+ if _, ok := utyp.key.Underlying().(*Interface); ok {
+ for _, vtyp := range visited[x.val] {
+ if Identical(vtyp, x.typ) {
+ duplicate = true
+ break
+ }
+ }
+ visited[x.val] = append(visited[x.val], x.typ)
+ } else {
+ _, duplicate = visited[x.val]
+ visited[x.val] = nil
+ }
+ if duplicate {
+ check.errorf(x.pos(), "duplicate key %s in map literal", x.val)
+ continue
+ }
+ }
+ check.exprWithHint(x, kv.Value, utyp.elem)
+ if !check.assignment(x, utyp.elem) {
+ if x.mode != invalid {
+ check.errorf(x.pos(), "cannot use %s as %s value in map literal", x, utyp.elem)
+ }
+ continue
+ }
+ }
+
+ default:
+ check.errorf(e.Pos(), "invalid composite literal type %s", typ)
+ goto Error
+ }
+
+ x.mode = value
+ x.typ = typ
+
+ case *ast.ParenExpr:
+ kind := check.rawExpr(x, e.X, nil)
+ x.expr = e
+ return kind
+
+ case *ast.SelectorExpr:
+ check.selector(x, e)
+
+ case *ast.IndexExpr:
+ check.expr(x, e.X)
+ if x.mode == invalid {
+ goto Error
+ }
+
+ valid := false
+ length := int64(-1) // valid if >= 0
+ switch typ := x.typ.Underlying().(type) {
+ case *Basic:
+ if isString(typ) {
+ valid = true
+ if x.mode == constant {
+ length = int64(len(exact.StringVal(x.val)))
+ }
+ // an indexed string always yields a byte value
+ // (not a constant) even if the string and the
+ // index are constant
+ x.mode = value
+ x.typ = UniverseByte // use 'byte' name
+ }
+
+ case *Array:
+ valid = true
+ length = typ.len
+ if x.mode != variable {
+ x.mode = value
+ }
+ x.typ = typ.elem
+
+ case *Pointer:
+ if typ, _ := typ.base.Underlying().(*Array); typ != nil {
+ valid = true
+ length = typ.len
+ x.mode = variable
+ x.typ = typ.elem
+ }
+
+ case *Slice:
+ valid = true
+ x.mode = variable
+ x.typ = typ.elem
+
+ case *Map:
+ var key operand
+ check.expr(&key, e.Index)
+ if !check.assignment(&key, typ.key) {
+ if key.mode != invalid {
+ check.invalidOp(key.pos(), "cannot use %s as map index of type %s", &key, typ.key)
+ }
+ goto Error
+ }
+ x.mode = mapindex
+ x.typ = typ.elem
+ x.expr = e
+ return expression
+ }
+
+ if !valid {
+ check.invalidOp(x.pos(), "cannot index %s", x)
+ goto Error
+ }
+
+ if e.Index == nil {
+ check.invalidAST(e.Pos(), "missing index for %s", x)
+ goto Error
+ }
+
+ check.index(e.Index, length)
+ // ok to continue
+
+ case *ast.SliceExpr:
+ check.expr(x, e.X)
+ if x.mode == invalid {
+ goto Error
+ }
+
+ valid := false
+ length := int64(-1) // valid if >= 0
+ switch typ := x.typ.Underlying().(type) {
+ case *Basic:
+ if isString(typ) {
+ if slice3(e) {
+ check.invalidOp(x.pos(), "3-index slice of string")
+ goto Error
+ }
+ valid = true
+ if x.mode == constant {
+ length = int64(len(exact.StringVal(x.val)))
+ }
+ // spec: "For untyped string operands the result
+ // is a non-constant value of type string."
+ if typ.kind == UntypedString {
+ x.typ = Typ[String]
+ }
+ }
+
+ case *Array:
+ valid = true
+ length = typ.len
+ if x.mode != variable {
+ check.invalidOp(x.pos(), "cannot slice %s (value not addressable)", x)
+ goto Error
+ }
+ x.typ = &Slice{elem: typ.elem}
+
+ case *Pointer:
+ if typ, _ := typ.base.Underlying().(*Array); typ != nil {
+ valid = true
+ length = typ.len
+ x.typ = &Slice{elem: typ.elem}
+ }
+
+ case *Slice:
+ valid = true
+ // x.typ doesn't change
+ }
+
+ if !valid {
+ check.invalidOp(x.pos(), "cannot slice %s", x)
+ goto Error
+ }
+
+ x.mode = value
+
+ // spec: "Only the first index may be omitted; it defaults to 0."
+ if slice3(e) && (e.High == nil || sliceMax(e) == nil) {
+ check.error(e.Rbrack, "2nd and 3rd index required in 3-index slice")
+ goto Error
+ }
+
+ // check indices
+ var ind [3]int64
+ for i, expr := range []ast.Expr{e.Low, e.High, sliceMax(e)} {
+ x := int64(-1)
+ switch {
+ case expr != nil:
+ // The "capacity" is only known statically for strings, arrays,
+ // and pointers to arrays, and it is the same as the length for
+ // those types.
+ max := int64(-1)
+ if length >= 0 {
+ max = length + 1
+ }
+ if t, ok := check.index(expr, max); ok && t >= 0 {
+ x = t
+ }
+ case i == 0:
+ // default is 0 for the first index
+ x = 0
+ case length >= 0:
+ // default is length (== capacity) otherwise
+ x = length
+ }
+ ind[i] = x
+ }
+
+ // constant indices must be in range
+ // (check.index already checks that existing indices >= 0)
+ L:
+ for i, x := range ind[:len(ind)-1] {
+ if x > 0 {
+ for _, y := range ind[i+1:] {
+ if y >= 0 && x > y {
+ check.errorf(e.Rbrack, "invalid slice indices: %d > %d", x, y)
+ break L // only report one error, ok to continue
+ }
+ }
+ }
+ }
+
+ case *ast.TypeAssertExpr:
+ check.expr(x, e.X)
+ if x.mode == invalid {
+ goto Error
+ }
+ xtyp, _ := x.typ.Underlying().(*Interface)
+ if xtyp == nil {
+ check.invalidOp(x.pos(), "%s is not an interface", x)
+ goto Error
+ }
+ // x.(type) expressions are handled explicitly in type switches
+ if e.Type == nil {
+ check.invalidAST(e.Pos(), "use of .(type) outside type switch")
+ goto Error
+ }
+ T := check.typ(e.Type)
+ if T == Typ[Invalid] {
+ goto Error
+ }
+ check.typeAssertion(x.pos(), x, xtyp, T)
+ x.mode = commaok
+ x.typ = T
+
+ case *ast.CallExpr:
+ return check.call(x, e)
+
+ case *ast.StarExpr:
+ check.exprOrType(x, e.X)
+ switch x.mode {
+ case invalid:
+ goto Error
+ case typexpr:
+ x.typ = &Pointer{base: x.typ}
+ default:
+ if typ, ok := x.typ.Underlying().(*Pointer); ok {
+ x.mode = variable
+ x.typ = typ.base
+ } else {
+ check.invalidOp(x.pos(), "cannot indirect %s", x)
+ goto Error
+ }
+ }
+
+ case *ast.UnaryExpr:
+ check.expr(x, e.X)
+ if x.mode == invalid {
+ goto Error
+ }
+ check.unary(x, e.Op)
+ if x.mode == invalid {
+ goto Error
+ }
+ if e.Op == token.ARROW {
+ x.expr = e
+ return statement // receive operations may appear in statement context
+ }
+
+ case *ast.BinaryExpr:
+ check.binary(x, e.X, e.Y, e.Op)
+ if x.mode == invalid {
+ goto Error
+ }
+
+ case *ast.KeyValueExpr:
+ // key:value expressions are handled in composite literals
+ check.invalidAST(e.Pos(), "no key:value expected")
+ goto Error
+
+ case *ast.ArrayType, *ast.StructType, *ast.FuncType,
+ *ast.InterfaceType, *ast.MapType, *ast.ChanType:
+ x.mode = typexpr
+ x.typ = check.typ(e)
+ // Note: rawExpr (caller of exprInternal) will call check.recordTypeAndValue
+ // even though check.typ has already called it. This is fine as both
+ // times the same expression and type are recorded. It is also not a
+ // performance issue because we only reach here for composite literal
+ // types, which are comparatively rare.
+
+ default:
+ panic(fmt.Sprintf("%s: unknown expression type %T", check.fset.Position(e.Pos()), e))
+ }
+
+ // everything went well
+ x.expr = e
+ return expression
+
+Error:
+ x.mode = invalid
+ x.expr = e
+ return statement // avoid follow-up errors
+}
+
+// typeAssertion checks that x.(T) is legal; xtyp must be the type of x.
+func (check *Checker) typeAssertion(pos token.Pos, x *operand, xtyp *Interface, T Type) {
+ method, wrongType := assertableTo(xtyp, T)
+ if method == nil {
+ return
+ }
+
+ var msg string
+ if wrongType {
+ msg = "wrong type for method"
+ } else {
+ msg = "missing method"
+ }
+ check.errorf(pos, "%s cannot have dynamic type %s (%s %s)", x, T, msg, method.name)
+}
+
+// expr typechecks expression e and initializes x with the expression value.
+// If an error occurred, x.mode is set to invalid.
+//
+func (check *Checker) expr(x *operand, e ast.Expr) {
+ check.rawExpr(x, e, nil)
+ var msg string
+ switch x.mode {
+ default:
+ return
+ case novalue:
+ msg = "used as value"
+ case builtin:
+ msg = "must be called"
+ case typexpr:
+ msg = "is not an expression"
+ }
+ check.errorf(x.pos(), "%s %s", x, msg)
+ x.mode = invalid
+}
+
+// exprWithHint typechecks expression e and initializes x with the expression value.
+// If an error occurred, x.mode is set to invalid.
+// If hint != nil, it is the type of a composite literal element.
+//
+func (check *Checker) exprWithHint(x *operand, e ast.Expr, hint Type) {
+ assert(hint != nil)
+ check.rawExpr(x, e, hint)
+ var msg string
+ switch x.mode {
+ default:
+ return
+ case novalue:
+ msg = "used as value"
+ case builtin:
+ msg = "must be called"
+ case typexpr:
+ msg = "is not an expression"
+ }
+ check.errorf(x.pos(), "%s %s", x, msg)
+ x.mode = invalid
+}
+
+// exprOrType typechecks expression or type e and initializes x with the expression value or type.
+// If an error occurred, x.mode is set to invalid.
+//
+func (check *Checker) exprOrType(x *operand, e ast.Expr) {
+ check.rawExpr(x, e, nil)
+ if x.mode == novalue {
+ check.errorf(x.pos(), "%s used as value or type", x)
+ x.mode = invalid
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/exprstring.go b/llgo/third_party/go.tools/go/types/exprstring.go
new file mode 100644
index 0000000000000000000000000000000000000000..370bdf35324ca7265430470fd40de3a321daef8b
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/exprstring.go
@@ -0,0 +1,220 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements printing of expressions.
+
+package types
+
+import (
+ "bytes"
+ "go/ast"
+)
+
+// ExprString returns the (possibly simplified) string representation for x.
+func ExprString(x ast.Expr) string {
+ var buf bytes.Buffer
+ WriteExpr(&buf, x)
+ return buf.String()
+}
+
+// WriteExpr writes the (possibly simplified) string representation for x to buf.
+func WriteExpr(buf *bytes.Buffer, x ast.Expr) {
+ // The AST preserves source-level parentheses so there is
+ // no need to introduce them here to correct for different
+ // operator precedences. (This assumes that the AST was
+ // generated by a Go parser.)
+
+ switch x := x.(type) {
+ default:
+ buf.WriteString("(bad expr)") // nil, ast.BadExpr, ast.KeyValueExpr
+
+ case *ast.Ident:
+ buf.WriteString(x.Name)
+
+ case *ast.Ellipsis:
+ buf.WriteString("...")
+ if x.Elt != nil {
+ WriteExpr(buf, x.Elt)
+ }
+
+ case *ast.BasicLit:
+ buf.WriteString(x.Value)
+
+ case *ast.FuncLit:
+ buf.WriteByte('(')
+ WriteExpr(buf, x.Type)
+ buf.WriteString(" literal)") // simplified
+
+ case *ast.CompositeLit:
+ buf.WriteByte('(')
+ WriteExpr(buf, x.Type)
+ buf.WriteString(" literal)") // simplified
+
+ case *ast.ParenExpr:
+ buf.WriteByte('(')
+ WriteExpr(buf, x.X)
+ buf.WriteByte(')')
+
+ case *ast.SelectorExpr:
+ WriteExpr(buf, x.X)
+ buf.WriteByte('.')
+ buf.WriteString(x.Sel.Name)
+
+ case *ast.IndexExpr:
+ WriteExpr(buf, x.X)
+ buf.WriteByte('[')
+ WriteExpr(buf, x.Index)
+ buf.WriteByte(']')
+
+ case *ast.SliceExpr:
+ WriteExpr(buf, x.X)
+ buf.WriteByte('[')
+ if x.Low != nil {
+ WriteExpr(buf, x.Low)
+ }
+ buf.WriteByte(':')
+ if x.High != nil {
+ WriteExpr(buf, x.High)
+ }
+ if x.Slice3 {
+ buf.WriteByte(':')
+ if x.Max != nil {
+ WriteExpr(buf, x.Max)
+ }
+ }
+ buf.WriteByte(']')
+
+ case *ast.TypeAssertExpr:
+ WriteExpr(buf, x.X)
+ buf.WriteString(".(")
+ WriteExpr(buf, x.Type)
+ buf.WriteByte(')')
+
+ case *ast.CallExpr:
+ WriteExpr(buf, x.Fun)
+ buf.WriteByte('(')
+ for i, arg := range x.Args {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ WriteExpr(buf, arg)
+ }
+ if x.Ellipsis.IsValid() {
+ buf.WriteString("...")
+ }
+ buf.WriteByte(')')
+
+ case *ast.StarExpr:
+ buf.WriteByte('*')
+ WriteExpr(buf, x.X)
+
+ case *ast.UnaryExpr:
+ buf.WriteString(x.Op.String())
+ WriteExpr(buf, x.X)
+
+ case *ast.BinaryExpr:
+ WriteExpr(buf, x.X)
+ buf.WriteByte(' ')
+ buf.WriteString(x.Op.String())
+ buf.WriteByte(' ')
+ WriteExpr(buf, x.Y)
+
+ case *ast.ArrayType:
+ buf.WriteByte('[')
+ if x.Len != nil {
+ WriteExpr(buf, x.Len)
+ }
+ buf.WriteByte(']')
+ WriteExpr(buf, x.Elt)
+
+ case *ast.StructType:
+ buf.WriteString("struct{")
+ writeFieldList(buf, x.Fields, "; ", false)
+ buf.WriteByte('}')
+
+ case *ast.FuncType:
+ buf.WriteString("func")
+ writeSigExpr(buf, x)
+
+ case *ast.InterfaceType:
+ buf.WriteString("interface{")
+ writeFieldList(buf, x.Methods, "; ", true)
+ buf.WriteByte('}')
+
+ case *ast.MapType:
+ buf.WriteString("map[")
+ WriteExpr(buf, x.Key)
+ buf.WriteByte(']')
+ WriteExpr(buf, x.Value)
+
+ case *ast.ChanType:
+ var s string
+ switch x.Dir {
+ case ast.SEND:
+ s = "chan<- "
+ case ast.RECV:
+ s = "<-chan "
+ default:
+ s = "chan "
+ }
+ buf.WriteString(s)
+ WriteExpr(buf, x.Value)
+ }
+}
+
+func writeSigExpr(buf *bytes.Buffer, sig *ast.FuncType) {
+ buf.WriteByte('(')
+ writeFieldList(buf, sig.Params, ", ", false)
+ buf.WriteByte(')')
+
+ res := sig.Results
+ n := res.NumFields()
+ if n == 0 {
+ // no result
+ return
+ }
+
+ buf.WriteByte(' ')
+ if n == 1 && len(res.List[0].Names) == 0 {
+ // single unnamed result
+ WriteExpr(buf, res.List[0].Type)
+ return
+ }
+
+ // multiple or named result(s)
+ buf.WriteByte('(')
+ writeFieldList(buf, res, ", ", false)
+ buf.WriteByte(')')
+}
+
+func writeFieldList(buf *bytes.Buffer, fields *ast.FieldList, sep string, iface bool) {
+ for i, f := range fields.List {
+ if i > 0 {
+ buf.WriteString(sep)
+ }
+
+ // field list names
+ for i, name := range f.Names {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(name.Name)
+ }
+
+ // types of interface methods consist of signatures only
+ if sig, _ := f.Type.(*ast.FuncType); sig != nil && iface {
+ writeSigExpr(buf, sig)
+ continue
+ }
+
+ // named fields are separated with a blank from the field type
+ if len(f.Names) > 0 {
+ buf.WriteByte(' ')
+ }
+
+ WriteExpr(buf, f.Type)
+
+ // ignore tag
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/exprstring_test.go b/llgo/third_party/go.tools/go/types/exprstring_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..48a6b4c8b33af612f09b46cc13a76823fdf97650
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/exprstring_test.go
@@ -0,0 +1,94 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types_test
+
+import (
+ "go/parser"
+ "testing"
+
+ . "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+var testExprs = []testEntry{
+ // basic type literals
+ dup("x"),
+ dup("true"),
+ dup("42"),
+ dup("3.1415"),
+ dup("2.71828i"),
+ dup(`'a'`),
+ dup(`"foo"`),
+ dup("`bar`"),
+
+ // func and composite literals
+ {"func(){}", "(func() literal)"},
+ {"func(x int) complex128 {}", "(func(x int) complex128 literal)"},
+ {"[]int{1, 2, 3}", "([]int literal)"},
+
+ // non-type expressions
+ dup("(x)"),
+ dup("x.f"),
+ dup("a[i]"),
+
+ dup("s[:]"),
+ dup("s[i:]"),
+ dup("s[:j]"),
+ dup("s[i:j]"),
+ dup("s[:j:k]"),
+ dup("s[i:j:k]"),
+
+ dup("x.(T)"),
+
+ dup("x.([10]int)"),
+ dup("x.([...]int)"),
+
+ dup("x.(struct{})"),
+ dup("x.(struct{x int; y, z float32; E})"),
+
+ dup("x.(func())"),
+ dup("x.(func(x int))"),
+ dup("x.(func() int)"),
+ dup("x.(func(x, y int, z float32) (r int))"),
+ dup("x.(func(a, b, c int))"),
+ dup("x.(func(x ...T))"),
+
+ dup("x.(interface{})"),
+ dup("x.(interface{m(); n(x int); E})"),
+ dup("x.(interface{m(); n(x int) T; E; F})"),
+
+ dup("x.(map[K]V)"),
+
+ dup("x.(chan E)"),
+ dup("x.(<-chan E)"),
+ dup("x.(chan<- chan int)"),
+ dup("x.(chan<- <-chan int)"),
+ dup("x.(<-chan chan int)"),
+ dup("x.(chan (<-chan int))"),
+
+ dup("f()"),
+ dup("f(x)"),
+ dup("int(x)"),
+ dup("f(x, x + y)"),
+ dup("f(s...)"),
+ dup("f(a, s...)"),
+
+ dup("*x"),
+ dup("&x"),
+ dup("x + y"),
+ dup("x + y << (2 * s)"),
+}
+
+func TestExprString(t *testing.T) {
+ for _, test := range testExprs {
+ x, err := parser.ParseExpr(test.src)
+ if err != nil {
+ t.Errorf("%s: %s", test.src, err)
+ continue
+ }
+ if got := ExprString(x); got != test.str {
+ t.Errorf("%s: got %s, want %s", test.src, got, test.str)
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/go11.go b/llgo/third_party/go.tools/go/types/go11.go
new file mode 100644
index 0000000000000000000000000000000000000000..cf41cabeeac727ed3c35f745a2fdad6ae90c6949
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/go11.go
@@ -0,0 +1,17 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.2
+
+package types
+
+import "go/ast"
+
+func slice3(x *ast.SliceExpr) bool {
+ return false
+}
+
+func sliceMax(x *ast.SliceExpr) ast.Expr {
+ return nil
+}
diff --git a/llgo/third_party/go.tools/go/types/go12.go b/llgo/third_party/go.tools/go/types/go12.go
new file mode 100644
index 0000000000000000000000000000000000000000..20174421540e1f15abcb0421493daafec7333c4e
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/go12.go
@@ -0,0 +1,17 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.2
+
+package types
+
+import "go/ast"
+
+func slice3(x *ast.SliceExpr) bool {
+ return x.Slice3
+}
+
+func sliceMax(x *ast.SliceExpr) ast.Expr {
+ return x.Max
+}
diff --git a/llgo/third_party/go.tools/go/types/hilbert_test.go b/llgo/third_party/go.tools/go/types/hilbert_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..521223a82282c275c9c682e2c3be43d6ade585e9
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/hilbert_test.go
@@ -0,0 +1,232 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types_test
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "testing"
+
+ . "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+var (
+ H = flag.Int("H", 5, "Hilbert matrix size")
+ out = flag.String("out", "", "write generated program to out")
+)
+
+func TestHilbert(t *testing.T) {
+ // generate source
+ src := program(*H, *out)
+ if *out != "" {
+ ioutil.WriteFile(*out, src, 0666)
+ return
+ }
+
+ // parse source
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "hilbert.go", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // type-check file
+ DefPredeclaredTestFuncs() // define assert built-in
+ _, err = Check(f.Name.Name, fset, []*ast.File{f})
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func program(n int, out string) []byte {
+ var g gen
+
+ g.p(`// WARNING: GENERATED FILE - DO NOT MODIFY MANUALLY!
+// (To generate, in go/types directory: go test -run=Hilbert -H=%d -out=%q)
+
+// This program tests arbitrary precision constant arithmetic
+// by generating the constant elements of a Hilbert matrix H,
+// its inverse I, and the product P = H*I. The product should
+// be the identity matrix.
+package main
+
+func main() {
+ if !ok {
+ printProduct()
+ return
+ }
+ println("PASS")
+}
+
+`, n, out)
+ g.hilbert(n)
+ g.inverse(n)
+ g.product(n)
+ g.verify(n)
+ g.printProduct(n)
+ g.binomials(2*n - 1)
+ g.factorials(2*n - 1)
+
+ return g.Bytes()
+}
+
+type gen struct {
+ bytes.Buffer
+}
+
+func (g *gen) p(format string, args ...interface{}) {
+ fmt.Fprintf(&g.Buffer, format, args...)
+}
+
+func (g *gen) hilbert(n int) {
+ g.p(`// Hilbert matrix, n = %d
+const (
+`, n)
+ for i := 0; i < n; i++ {
+ g.p("\t")
+ for j := 0; j < n; j++ {
+ if j > 0 {
+ g.p(", ")
+ }
+ g.p("h%d_%d", i, j)
+ }
+ if i == 0 {
+ g.p(" = ")
+ for j := 0; j < n; j++ {
+ if j > 0 {
+ g.p(", ")
+ }
+ g.p("1.0/(iota + %d)", j+1)
+ }
+ }
+ g.p("\n")
+ }
+ g.p(")\n\n")
+}
+
+func (g *gen) inverse(n int) {
+ g.p(`// Inverse Hilbert matrix
+const (
+`)
+ for i := 0; i < n; i++ {
+ for j := 0; j < n; j++ {
+ s := "+"
+ if (i+j)&1 != 0 {
+ s = "-"
+ }
+ g.p("\ti%d_%d = %s%d * b%d_%d * b%d_%d * b%d_%d * b%d_%d\n",
+ i, j, s, i+j+1, n+i, n-j-1, n+j, n-i-1, i+j, i, i+j, i)
+ }
+ g.p("\n")
+ }
+ g.p(")\n\n")
+}
+
+func (g *gen) product(n int) {
+ g.p(`// Product matrix
+const (
+`)
+ for i := 0; i < n; i++ {
+ for j := 0; j < n; j++ {
+ g.p("\tp%d_%d = ", i, j)
+ for k := 0; k < n; k++ {
+ if k > 0 {
+ g.p(" + ")
+ }
+ g.p("h%d_%d*i%d_%d", i, k, k, j)
+ }
+ g.p("\n")
+ }
+ g.p("\n")
+ }
+ g.p(")\n\n")
+}
+
+func (g *gen) verify(n int) {
+ g.p(`// Verify that product is the identity matrix
+const ok =
+`)
+ for i := 0; i < n; i++ {
+ for j := 0; j < n; j++ {
+ if j == 0 {
+ g.p("\t")
+ } else {
+ g.p(" && ")
+ }
+ v := 0
+ if i == j {
+ v = 1
+ }
+ g.p("p%d_%d == %d", i, j, v)
+ }
+ g.p(" &&\n")
+ }
+ g.p("\ttrue\n\n")
+
+ // verify ok at type-check time
+ if *out == "" {
+ g.p("const _ = assert(ok)\n\n")
+ }
+}
+
+func (g *gen) printProduct(n int) {
+ g.p("func printProduct() {\n")
+ for i := 0; i < n; i++ {
+ g.p("\tprintln(")
+ for j := 0; j < n; j++ {
+ if j > 0 {
+ g.p(", ")
+ }
+ g.p("p%d_%d", i, j)
+ }
+ g.p(")\n")
+ }
+ g.p("}\n\n")
+}
+
+func (g *gen) mulRange(a, b int) {
+ if a > b {
+ g.p("1")
+ return
+ }
+ for i := a; i <= b; i++ {
+ if i > a {
+ g.p("*")
+ }
+ g.p("%d", i)
+ }
+}
+
+func (g *gen) binomials(n int) {
+ g.p(`// Binomials
+const (
+`)
+ for j := 0; j <= n; j++ {
+ if j > 0 {
+ g.p("\n")
+ }
+ for k := 0; k <= j; k++ {
+ g.p("\tb%d_%d = f%d / (f%d*f%d)\n", j, k, j, k, j-k)
+ }
+ }
+ g.p(")\n\n")
+}
+
+func (g *gen) factorials(n int) {
+ g.p(`// Factorials
+const (
+ f0 = 1
+ f1 = 1
+`)
+ for i := 2; i <= n; i++ {
+ g.p("\tf%d = f%d * %d\n", i, i-1, i)
+ }
+ g.p(")\n\n")
+}
diff --git a/llgo/third_party/go.tools/go/types/initorder.go b/llgo/third_party/go.tools/go/types/initorder.go
new file mode 100644
index 0000000000000000000000000000000000000000..0fd567b26910e8fb64b4e070538dcfe347938162
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/initorder.go
@@ -0,0 +1,222 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "container/heap"
+ "fmt"
+)
+
+// initOrder computes the Info.InitOrder for package variables.
+func (check *Checker) initOrder() {
+ // An InitOrder may already have been computed if a package is
+ // built from several calls to (*Checker).Files. Clear it.
+ check.Info.InitOrder = check.Info.InitOrder[:0]
+
+ // compute the object dependency graph and
+ // initialize a priority queue with the list
+ // of graph nodes
+ pq := nodeQueue(dependencyGraph(check.objMap))
+ heap.Init(&pq)
+
+ const debug = false
+ if debug {
+ fmt.Printf("package %s: object dependency graph\n", check.pkg.Name())
+ for _, n := range pq {
+ for _, o := range n.out {
+ fmt.Printf("\t%s -> %s\n", n.obj.Name(), o.obj.Name())
+ }
+ }
+ fmt.Println()
+ fmt.Printf("package %s: initialization order\n", check.pkg.Name())
+ }
+
+ // determine initialization order by removing the highest priority node
+ // (the one with the fewest dependencies) and its edges from the graph,
+ // repeatedly, until there are no nodes left.
+ // In a valid Go program, those nodes always have zero dependencies (after
+ // removing all incoming dependencies), otherwise there are initialization
+ // cycles.
+ mark := 0
+ emitted := make(map[*declInfo]bool)
+ for len(pq) > 0 {
+ // get the next node
+ n := heap.Pop(&pq).(*objNode)
+
+ // if n still depends on other nodes, we have a cycle
+ if n.in > 0 {
+ mark++ // mark nodes using a different value each time
+ cycle := findPath(n, n, mark)
+ if i := valIndex(cycle); i >= 0 {
+ check.reportCycle(cycle, i)
+ }
+ // ok to continue, but the variable initialization order
+ // will be incorrect at this point since it assumes no
+ // cycle errors
+ }
+
+ // reduce dependency count of all dependent nodes
+ // and update priority queue
+ for _, out := range n.out {
+ out.in--
+ heap.Fix(&pq, out.index)
+ }
+
+ // record the init order for variables with initializers only
+ v, _ := n.obj.(*Var)
+ info := check.objMap[v]
+ if v == nil || !info.hasInitializer() {
+ continue
+ }
+
+ // n:1 variable declarations such as: a, b = f()
+ // introduce a node for each lhs variable (here: a, b);
+ // but they all have the same initializer - emit only
+ // one, for the first variable seen
+ if emitted[info] {
+ continue // initializer already emitted, if any
+ }
+ emitted[info] = true
+
+ infoLhs := info.lhs // possibly nil (see declInfo.lhs field comment)
+ if infoLhs == nil {
+ infoLhs = []*Var{v}
+ }
+ init := &Initializer{infoLhs, info.init}
+ check.Info.InitOrder = append(check.Info.InitOrder, init)
+
+ if debug {
+ fmt.Printf("\t%s\n", init)
+ }
+ }
+
+ if debug {
+ fmt.Println()
+ }
+}
+
+// findPath returns the (reversed) list of nodes z, ... c, b, a,
+// such that there is a path (list of edges) from a to z.
+// If there is no such path, the result is nil.
+// Nodes marked with the value mark are considered "visited";
+// unvisited nodes are marked during the graph search.
+func findPath(a, z *objNode, mark int) []*objNode {
+ if a.mark == mark {
+ return nil // node already seen
+ }
+ a.mark = mark
+
+ for _, n := range a.out {
+ if n == z {
+ return []*objNode{z}
+ }
+ if P := findPath(n, z, mark); P != nil {
+ return append(P, n)
+ }
+ }
+
+ return nil
+}
+
+// valIndex returns the index of the first constant or variable in a,
+// if any; or a value < 0.
+func valIndex(a []*objNode) int {
+ for i, n := range a {
+ switch n.obj.(type) {
+ case *Const, *Var:
+ return i
+ }
+ }
+ return -1
+}
+
+// reportCycle reports an error for the cycle starting at i.
+func (check *Checker) reportCycle(cycle []*objNode, i int) {
+ obj := cycle[i].obj
+ check.errorf(obj.Pos(), "initialization cycle for %s", obj.Name())
+ // print cycle
+ for _ = range cycle {
+ check.errorf(obj.Pos(), "\t%s refers to", obj.Name()) // secondary error, \t indented
+ i++
+ if i >= len(cycle) {
+ i = 0
+ }
+ obj = cycle[i].obj
+ }
+ check.errorf(obj.Pos(), "\t%s", obj.Name())
+}
+
+// An objNode represents a node in the object dependency graph.
+// Each node b in a.out represents an edge a->b indicating that
+// b depends on a.
+// Nodes may be marked for cycle detection. A node n is marked
+// if n.mark corresponds to the current mark value.
+type objNode struct {
+ obj Object // object represented by this node
+ in int // number of nodes this node depends on
+ out []*objNode // list of nodes that depend on this node
+ index int // node index in list of nodes
+ mark int // for cycle detection
+}
+
+// dependencyGraph computes the transposed object dependency graph
+// from the given objMap. The transposed graph is returned as a list
+// of nodes; an edge d->n indicates that node n depends on node d.
+func dependencyGraph(objMap map[Object]*declInfo) []*objNode {
+ // M maps each object to its corresponding node
+ M := make(map[Object]*objNode, len(objMap))
+ for obj := range objMap {
+ M[obj] = &objNode{obj: obj}
+ }
+
+ // G is the graph of nodes n
+ G := make([]*objNode, len(M))
+ i := 0
+ for obj, n := range M {
+ deps := objMap[obj].deps
+ n.in = len(deps)
+ for d := range deps {
+ d := M[d] // node n depends on node d
+ d.out = append(d.out, n) // add edge d->n
+ }
+
+ G[i] = n
+ n.index = i
+ i++
+ }
+
+ return G
+}
+
+// nodeQueue implements the container/heap interface;
+// a nodeQueue may be used as a priority queue.
+type nodeQueue []*objNode
+
+func (a nodeQueue) Len() int { return len(a) }
+
+func (a nodeQueue) Swap(i, j int) {
+ x, y := a[i], a[j]
+ a[i], a[j] = y, x
+ x.index, y.index = j, i
+}
+
+func (a nodeQueue) Less(i, j int) bool {
+ x, y := a[i], a[j]
+ // nodes are prioritized by number of incoming dependencies (1st key)
+ // and source order (2nd key)
+ return x.in < y.in || x.in == y.in && x.obj.order() < y.obj.order()
+}
+
+func (a *nodeQueue) Push(x interface{}) {
+ panic("unreachable")
+}
+
+func (a *nodeQueue) Pop() interface{} {
+ n := len(*a)
+ x := (*a)[n-1]
+ x.index = -1 // for safety
+ *a = (*a)[:n-1]
+ return x
+}
diff --git a/llgo/third_party/go.tools/go/types/issues_test.go b/llgo/third_party/go.tools/go/types/issues_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..02b81fe6dec05428f85ac02f6c04e133d00df128
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/issues_test.go
@@ -0,0 +1,205 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements tests for various issues.
+
+package types_test
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "sort"
+ "strings"
+ "testing"
+
+ _ "llvm.org/llgo/third_party/go.tools/go/gcimporter"
+ . "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+func TestIssue5770(t *testing.T) {
+ src := `package p; type S struct{T}`
+ f, err := parser.ParseFile(fset, "", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = Check(f.Name.Name, fset, []*ast.File{f}) // do not crash
+ want := "undeclared name: T"
+ if err == nil || !strings.Contains(err.Error(), want) {
+ t.Errorf("got: %v; want: %s", err, want)
+ }
+}
+
+func TestIssue5849(t *testing.T) {
+ src := `
+package p
+var (
+ s uint
+ _ = uint8(8)
+ _ = uint16(16) << s
+ _ = uint32(32 << s)
+ _ = uint64(64 << s + s)
+ _ = (interface{})("foo")
+ _ = (interface{})(nil)
+)`
+ f, err := parser.ParseFile(fset, "", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var conf Config
+ types := make(map[ast.Expr]TypeAndValue)
+ _, err = conf.Check(f.Name.Name, fset, []*ast.File{f}, &Info{Types: types})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for x, tv := range types {
+ var want Type
+ switch x := x.(type) {
+ case *ast.BasicLit:
+ switch x.Value {
+ case `8`:
+ want = Typ[Uint8]
+ case `16`:
+ want = Typ[Uint16]
+ case `32`:
+ want = Typ[Uint32]
+ case `64`:
+ want = Typ[Uint] // because of "+ s", s is of type uint
+ case `"foo"`:
+ want = Typ[String]
+ }
+ case *ast.Ident:
+ if x.Name == "nil" {
+ want = Typ[UntypedNil]
+ }
+ }
+ if want != nil && !Identical(tv.Type, want) {
+ t.Errorf("got %s; want %s", tv.Type, want)
+ }
+ }
+}
+
+func TestIssue6413(t *testing.T) {
+ src := `
+package p
+func f() int {
+ defer f()
+ go f()
+ return 0
+}
+`
+ f, err := parser.ParseFile(fset, "", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var conf Config
+ types := make(map[ast.Expr]TypeAndValue)
+ _, err = conf.Check(f.Name.Name, fset, []*ast.File{f}, &Info{Types: types})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want := Typ[Int]
+ n := 0
+ for x, tv := range types {
+ if _, ok := x.(*ast.CallExpr); ok {
+ if tv.Type != want {
+ t.Errorf("%s: got %s; want %s", fset.Position(x.Pos()), tv.Type, want)
+ }
+ n++
+ }
+ }
+
+ if n != 2 {
+ t.Errorf("got %d CallExprs; want 2", n)
+ }
+}
+
+func TestIssue7245(t *testing.T) {
+ src := `
+package p
+func (T) m() (res bool) { return }
+type T struct{} // receiver type after method declaration
+`
+ f, err := parser.ParseFile(fset, "", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var conf Config
+ defs := make(map[*ast.Ident]Object)
+ _, err = conf.Check(f.Name.Name, fset, []*ast.File{f}, &Info{Defs: defs})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ m := f.Decls[0].(*ast.FuncDecl)
+ res1 := defs[m.Name].(*Func).Type().(*Signature).Results().At(0)
+ res2 := defs[m.Type.Results.List[0].Names[0]].(*Var)
+
+ if res1 != res2 {
+ t.Errorf("got %s (%p) != %s (%p)", res1, res2, res1, res2)
+ }
+}
+
+// This tests that uses of existing vars on the LHS of an assignment
+// are Uses, not Defs; and also that the (illegal) use of a non-var on
+// the LHS of an assignment is a Use nonetheless.
+func TestIssue7827(t *testing.T) {
+ const src = `
+package p
+func _() {
+ const w = 1 // defs w
+ x, y := 2, 3 // defs x, y
+ w, x, z := 4, 5, 6 // uses w, x, defs z; error: cannot assign to w
+ _, _, _ = x, y, z // uses x, y, z
+}
+`
+ const want = `L3 defs func p._()
+L4 defs const w untyped int
+L5 defs var x int
+L5 defs var y int
+L6 defs var z int
+L6 uses const w untyped int
+L6 uses var x int
+L7 uses var x int
+L7 uses var y int
+L7 uses var z int`
+
+ f, err := parser.ParseFile(fset, "", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // don't abort at the first error
+ conf := Config{Error: func(err error) { t.Log(err) }}
+ defs := make(map[*ast.Ident]Object)
+ uses := make(map[*ast.Ident]Object)
+ _, err = conf.Check(f.Name.Name, fset, []*ast.File{f}, &Info{Defs: defs, Uses: uses})
+ if s := fmt.Sprint(err); !strings.HasSuffix(s, "cannot assign to w") {
+ t.Errorf("Check: unexpected error: %s", s)
+ }
+
+ var facts []string
+ for id, obj := range defs {
+ if obj != nil {
+ fact := fmt.Sprintf("L%d defs %s", fset.Position(id.Pos()).Line, obj)
+ facts = append(facts, fact)
+ }
+ }
+ for id, obj := range uses {
+ fact := fmt.Sprintf("L%d uses %s", fset.Position(id.Pos()).Line, obj)
+ facts = append(facts, fact)
+ }
+ sort.Strings(facts)
+
+ got := strings.Join(facts, "\n")
+ if got != want {
+ t.Errorf("Unexpected defs/uses\ngot:\n%s\nwant:\n%s", got, want)
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/labels.go b/llgo/third_party/go.tools/go/types/labels.go
new file mode 100644
index 0000000000000000000000000000000000000000..d6ffc52368188f566657fa4ed9276252fd56c022
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/labels.go
@@ -0,0 +1,268 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+// labels checks correct label use in body.
+func (check *Checker) labels(body *ast.BlockStmt) {
+ // set of all labels in this body
+ all := NewScope(nil, "label")
+
+ fwdJumps := check.blockBranches(all, nil, nil, body.List)
+
+ // If there are any forward jumps left, no label was found for
+ // the corresponding goto statements. Either those labels were
+ // never defined, or they are inside blocks and not reachable
+ // for the respective gotos.
+ for _, jmp := range fwdJumps {
+ var msg string
+ name := jmp.Label.Name
+ if alt := all.Lookup(name); alt != nil {
+ msg = "goto %s jumps into block"
+ alt.(*Label).used = true // avoid another error
+ } else {
+ msg = "label %s not declared"
+ }
+ check.errorf(jmp.Label.Pos(), msg, name)
+ }
+
+ // spec: "It is illegal to define a label that is never used."
+ for _, obj := range all.elems {
+ if lbl := obj.(*Label); !lbl.used {
+ check.softErrorf(lbl.pos, "label %s declared but not used", lbl.name)
+ }
+ }
+}
+
+// A block tracks label declarations in a block and its enclosing blocks.
+type block struct {
+ parent *block // enclosing block
+ lstmt *ast.LabeledStmt // labeled statement to which this block belongs, or nil
+ labels map[string]*ast.LabeledStmt // allocated lazily
+}
+
+// insert records a new label declaration for the current block.
+// The label must not have been declared before in any block.
+func (b *block) insert(s *ast.LabeledStmt) {
+ name := s.Label.Name
+ if debug {
+ assert(b.gotoTarget(name) == nil)
+ }
+ labels := b.labels
+ if labels == nil {
+ labels = make(map[string]*ast.LabeledStmt)
+ b.labels = labels
+ }
+ labels[name] = s
+}
+
+// gotoTarget returns the labeled statement in the current
+// or an enclosing block with the given label name, or nil.
+func (b *block) gotoTarget(name string) *ast.LabeledStmt {
+ for s := b; s != nil; s = s.parent {
+ if t := s.labels[name]; t != nil {
+ return t
+ }
+ }
+ return nil
+}
+
+// enclosingTarget returns the innermost enclosing labeled
+// statement with the given label name, or nil.
+func (b *block) enclosingTarget(name string) *ast.LabeledStmt {
+ for s := b; s != nil; s = s.parent {
+ if t := s.lstmt; t != nil && t.Label.Name == name {
+ return t
+ }
+ }
+ return nil
+}
+
+// blockBranches processes a block's statement list and returns the set of outgoing forward jumps.
+// all is the scope of all declared labels, parent the set of labels declared in the immediately
+// enclosing block, and lstmt is the labeled statement this block is associated with (or nil).
+func (check *Checker) blockBranches(all *Scope, parent *block, lstmt *ast.LabeledStmt, list []ast.Stmt) []*ast.BranchStmt {
+ b := &block{parent: parent, lstmt: lstmt}
+
+ var (
+ varDeclPos token.Pos
+ fwdJumps, badJumps []*ast.BranchStmt
+ )
+
+ // All forward jumps jumping over a variable declaration are possibly
+ // invalid (they may still jump out of the block and be ok).
+ // recordVarDecl records them for the given position.
+ recordVarDecl := func(pos token.Pos) {
+ varDeclPos = pos
+ badJumps = append(badJumps[:0], fwdJumps...) // copy fwdJumps to badJumps
+ }
+
+ jumpsOverVarDecl := func(jmp *ast.BranchStmt) bool {
+ if varDeclPos.IsValid() {
+ for _, bad := range badJumps {
+ if jmp == bad {
+ return true
+ }
+ }
+ }
+ return false
+ }
+
+ blockBranches := func(lstmt *ast.LabeledStmt, list []ast.Stmt) {
+ // Unresolved forward jumps inside the nested block
+ // become forward jumps in the current block.
+ fwdJumps = append(fwdJumps, check.blockBranches(all, b, lstmt, list)...)
+ }
+
+ var stmtBranches func(ast.Stmt)
+ stmtBranches = func(s ast.Stmt) {
+ switch s := s.(type) {
+ case *ast.DeclStmt:
+ if d, _ := s.Decl.(*ast.GenDecl); d != nil && d.Tok == token.VAR {
+ recordVarDecl(d.Pos())
+ }
+
+ case *ast.LabeledStmt:
+ // declare non-blank label
+ if name := s.Label.Name; name != "_" {
+ lbl := NewLabel(s.Label.Pos(), check.pkg, name)
+ if alt := all.Insert(lbl); alt != nil {
+ check.softErrorf(lbl.pos, "label %s already declared", name)
+ check.reportAltDecl(alt)
+ // ok to continue
+ } else {
+ b.insert(s)
+ check.recordDef(s.Label, lbl)
+ }
+ // resolve matching forward jumps and remove them from fwdJumps
+ i := 0
+ for _, jmp := range fwdJumps {
+ if jmp.Label.Name == name {
+ // match
+ lbl.used = true
+ check.recordUse(jmp.Label, lbl)
+ if jumpsOverVarDecl(jmp) {
+ check.softErrorf(
+ jmp.Label.Pos(),
+ "goto %s jumps over variable declaration at line %d",
+ name,
+ check.fset.Position(varDeclPos).Line,
+ )
+ // ok to continue
+ }
+ } else {
+ // no match - record new forward jump
+ fwdJumps[i] = jmp
+ i++
+ }
+ }
+ fwdJumps = fwdJumps[:i]
+ lstmt = s
+ }
+ stmtBranches(s.Stmt)
+
+ case *ast.BranchStmt:
+ if s.Label == nil {
+ return // checked in 1st pass (check.stmt)
+ }
+
+ // determine and validate target
+ name := s.Label.Name
+ switch s.Tok {
+ case token.BREAK:
+ // spec: "If there is a label, it must be that of an enclosing
+ // "for", "switch", or "select" statement, and that is the one
+ // whose execution terminates."
+ valid := false
+ if t := b.enclosingTarget(name); t != nil {
+ switch t.Stmt.(type) {
+ case *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt, *ast.ForStmt, *ast.RangeStmt:
+ valid = true
+ }
+ }
+ if !valid {
+ check.errorf(s.Label.Pos(), "invalid break label %s", name)
+ return
+ }
+
+ case token.CONTINUE:
+ // spec: "If there is a label, it must be that of an enclosing
+ // "for" statement, and that is the one whose execution advances."
+ valid := false
+ if t := b.enclosingTarget(name); t != nil {
+ switch t.Stmt.(type) {
+ case *ast.ForStmt, *ast.RangeStmt:
+ valid = true
+ }
+ }
+ if !valid {
+ check.errorf(s.Label.Pos(), "invalid continue label %s", name)
+ return
+ }
+
+ case token.GOTO:
+ if b.gotoTarget(name) == nil {
+ // label may be declared later - add branch to forward jumps
+ fwdJumps = append(fwdJumps, s)
+ return
+ }
+
+ default:
+ check.invalidAST(s.Pos(), "branch statement: %s %s", s.Tok, name)
+ return
+ }
+
+ // record label use
+ obj := all.Lookup(name)
+ obj.(*Label).used = true
+ check.recordUse(s.Label, obj)
+
+ case *ast.AssignStmt:
+ if s.Tok == token.DEFINE {
+ recordVarDecl(s.Pos())
+ }
+
+ case *ast.BlockStmt:
+ blockBranches(lstmt, s.List)
+
+ case *ast.IfStmt:
+ stmtBranches(s.Body)
+ if s.Else != nil {
+ stmtBranches(s.Else)
+ }
+
+ case *ast.CaseClause:
+ blockBranches(nil, s.Body)
+
+ case *ast.SwitchStmt:
+ stmtBranches(s.Body)
+
+ case *ast.TypeSwitchStmt:
+ stmtBranches(s.Body)
+
+ case *ast.CommClause:
+ blockBranches(nil, s.Body)
+
+ case *ast.SelectStmt:
+ stmtBranches(s.Body)
+
+ case *ast.ForStmt:
+ stmtBranches(s.Body)
+
+ case *ast.RangeStmt:
+ stmtBranches(s.Body)
+ }
+ }
+
+ for _, s := range list {
+ stmtBranches(s)
+ }
+
+ return fwdJumps
+}
diff --git a/llgo/third_party/go.tools/go/types/lookup.go b/llgo/third_party/go.tools/go/types/lookup.go
new file mode 100644
index 0000000000000000000000000000000000000000..3caca5519b63c3b5d8d8d9646471be64cbe94c34
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/lookup.go
@@ -0,0 +1,341 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements various field and method lookup functions.
+
+package types
+
+// LookupFieldOrMethod looks up a field or method with given package and name
+// in T and returns the corresponding *Var or *Func, an index sequence, and a
+// bool indicating if there were any pointer indirections on the path to the
+// field or method. If addressable is set, T is the type of an addressable
+// variable (only matters for method lookups).
+//
+// The last index entry is the field or method index in the (possibly embedded)
+// type where the entry was found, either:
+//
+// 1) the list of declared methods of a named type; or
+// 2) the list of all methods (method set) of an interface type; or
+// 3) the list of fields of a struct type.
+//
+// The earlier index entries are the indices of the anonymous struct fields
+// traversed to get to the found entry, starting at depth 0.
+//
+// If no entry is found, a nil object is returned. In this case, the returned
+// index and indirect values have the following meaning:
+//
+// - If index != nil, the index sequence points to an ambiguous entry
+// (the same name appeared more than once at the same embedding level).
+//
+// - If indirect is set, a method with a pointer receiver type was found
+// but there was no pointer on the path from the actual receiver type to
+// the method's formal receiver base type, nor was the receiver addressable.
+//
+func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
+ // Methods cannot be associated to a named pointer type
+ // (spec: "The type denoted by T is called the receiver base type;
+ // it must not be a pointer or interface type and it must be declared
+ // in the same package as the method.").
+ // Thus, if we have a named pointer type, proceed with the underlying
+ // pointer type but discard the result if it is a method since we would
+ // not have found it for T (see also issue 8590).
+ if t, _ := T.(*Named); t != nil {
+ if p, _ := t.underlying.(*Pointer); p != nil {
+ obj, index, indirect = lookupFieldOrMethod(p, false, pkg, name)
+ if _, ok := obj.(*Func); ok {
+ return nil, nil, false
+ }
+ return
+ }
+ }
+
+ return lookupFieldOrMethod(T, addressable, pkg, name)
+}
+
+// TODO(gri) The named type consolidation and seen maps below must be
+// indexed by unique keys for a given type. Verify that named
+// types always have only one representation (even when imported
+// indirectly via different packages.)
+
+func lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
+ // WARNING: The code in this function is extremely subtle - do not modify casually!
+ // This function and NewMethodSet should be kept in sync.
+
+ if name == "_" {
+ return // blank fields/methods are never found
+ }
+
+ typ, isPtr := deref(T)
+ named, _ := typ.(*Named)
+
+ // *typ where typ is an interface has no methods.
+ if isPtr {
+ utyp := typ
+ if named != nil {
+ utyp = named.underlying
+ }
+ if _, ok := utyp.(*Interface); ok {
+ return
+ }
+ }
+
+ // Start with typ as single entry at shallowest depth.
+ // If typ is not a named type, insert a nil type instead.
+ current := []embeddedType{{named, nil, isPtr, false}}
+
+ // named types that we have seen already, allocated lazily
+ var seen map[*Named]bool
+
+ // search current depth
+ for len(current) > 0 {
+ var next []embeddedType // embedded types found at current depth
+
+ // look for (pkg, name) in all types at current depth
+ for _, e := range current {
+ // The very first time only, e.typ may be nil.
+ // In this case, we don't have a named type and
+ // we simply continue with the underlying type.
+ if e.typ != nil {
+ if seen[e.typ] {
+ // We have seen this type before, at a more shallow depth
+ // (note that multiples of this type at the current depth
+ // were consolidated before). The type at that depth shadows
+ // this same type at the current depth, so we can ignore
+ // this one.
+ continue
+ }
+ if seen == nil {
+ seen = make(map[*Named]bool)
+ }
+ seen[e.typ] = true
+
+ // look for a matching attached method
+ if i, m := lookupMethod(e.typ.methods, pkg, name); m != nil {
+ // potential match
+ assert(m.typ != nil)
+ index = concat(e.index, i)
+ if obj != nil || e.multiples {
+ return nil, index, false // collision
+ }
+ obj = m
+ indirect = e.indirect
+ continue // we can't have a matching field or interface method
+ }
+
+ // continue with underlying type
+ typ = e.typ.underlying
+ }
+
+ switch t := typ.(type) {
+ case *Struct:
+ // look for a matching field and collect embedded types
+ for i, f := range t.fields {
+ if f.sameId(pkg, name) {
+ assert(f.typ != nil)
+ index = concat(e.index, i)
+ if obj != nil || e.multiples {
+ return nil, index, false // collision
+ }
+ obj = f
+ indirect = e.indirect
+ continue // we can't have a matching interface method
+ }
+ // Collect embedded struct fields for searching the next
+ // lower depth, but only if we have not seen a match yet
+ // (if we have a match it is either the desired field or
+ // we have a name collision on the same depth; in either
+ // case we don't need to look further).
+ // Embedded fields are always of the form T or *T where
+ // T is a named type. If e.typ appeared multiple times at
+ // this depth, f.typ appears multiple times at the next
+ // depth.
+ if obj == nil && f.anonymous {
+ // Ignore embedded basic types - only user-defined
+ // named types can have methods or struct fields.
+ typ, isPtr := deref(f.typ)
+ if t, _ := typ.(*Named); t != nil {
+ next = append(next, embeddedType{t, concat(e.index, i), e.indirect || isPtr, e.multiples})
+ }
+ }
+ }
+
+ case *Interface:
+ // look for a matching method
+ // TODO(gri) t.allMethods is sorted - use binary search
+ if i, m := lookupMethod(t.allMethods, pkg, name); m != nil {
+ assert(m.typ != nil)
+ index = concat(e.index, i)
+ if obj != nil || e.multiples {
+ return nil, index, false // collision
+ }
+ obj = m
+ indirect = e.indirect
+ }
+ }
+ }
+
+ if obj != nil {
+ // found a potential match
+ // spec: "A method call x.m() is valid if the method set of (the type of) x
+ // contains m and the argument list can be assigned to the parameter
+ // list of m. If x is addressable and &x's method set contains m, x.m()
+ // is shorthand for (&x).m()".
+ if f, _ := obj.(*Func); f != nil && ptrRecv(f) && !indirect && !addressable {
+ return nil, nil, true // pointer/addressable receiver required
+ }
+ return
+ }
+
+ current = consolidateMultiples(next)
+ }
+
+ return nil, nil, false // not found
+}
+
+// embeddedType represents an embedded named type
+type embeddedType struct {
+ typ *Named // nil means use the outer typ variable instead
+ index []int // embedded field indices, starting with index at depth 0
+ indirect bool // if set, there was a pointer indirection on the path to this field
+ multiples bool // if set, typ appears multiple times at this depth
+}
+
+// consolidateMultiples collects multiple list entries with the same type
+// into a single entry marked as containing multiples. The result is the
+// consolidated list.
+func consolidateMultiples(list []embeddedType) []embeddedType {
+ if len(list) <= 1 {
+ return list // at most one entry - nothing to do
+ }
+
+ n := 0 // number of entries w/ unique type
+ prev := make(map[*Named]int) // index at which type was previously seen
+ for _, e := range list {
+ if i, found := prev[e.typ]; found {
+ list[i].multiples = true
+ // ignore this entry
+ } else {
+ prev[e.typ] = n
+ list[n] = e
+ n++
+ }
+ }
+ return list[:n]
+}
+
+// MissingMethod returns (nil, false) if V implements T, otherwise it
+// returns a missing method required by T and whether it is missing or
+// just has the wrong type.
+//
+// For non-interface types V, or if static is set, V implements T if all
+// methods of T are present in V. Otherwise (V is an interface and static
+// is not set), MissingMethod only checks that methods of T which are also
+// present in V have matching types (e.g., for a type assertion x.(T) where
+// x is of interface type V).
+//
+func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType bool) {
+ // fast path for common case
+ if T.Empty() {
+ return
+ }
+
+ // TODO(gri) Consider using method sets here. Might be more efficient.
+
+ if ityp, _ := V.Underlying().(*Interface); ityp != nil {
+ // TODO(gri) allMethods is sorted - can do this more efficiently
+ for _, m := range T.allMethods {
+ _, obj := lookupMethod(ityp.allMethods, m.pkg, m.name)
+ switch {
+ case obj == nil:
+ if static {
+ return m, false
+ }
+ case !Identical(obj.Type(), m.typ):
+ return m, true
+ }
+ }
+ return
+ }
+
+ // A concrete type implements T if it implements all methods of T.
+ for _, m := range T.allMethods {
+ obj, _, _ := lookupFieldOrMethod(V, false, m.pkg, m.name)
+
+ f, _ := obj.(*Func)
+ if f == nil {
+ return m, false
+ }
+
+ if !Identical(f.typ, m.typ) {
+ return m, true
+ }
+ }
+
+ return
+}
+
+// assertableTo reports whether a value of type V can be asserted to have type T.
+// It returns (nil, false) as affirmative answer. Otherwise it returns a missing
+// method required by V and whether it is missing or just has the wrong type.
+func assertableTo(V *Interface, T Type) (method *Func, wrongType bool) {
+ // no static check is required if T is an interface
+ // spec: "If T is an interface type, x.(T) asserts that the
+ // dynamic type of x implements the interface T."
+ if _, ok := T.Underlying().(*Interface); ok && !strict {
+ return
+ }
+ return MissingMethod(T, V, false)
+}
+
+// deref dereferences typ if it is a *Pointer and returns its base and true.
+// Otherwise it returns (typ, false).
+func deref(typ Type) (Type, bool) {
+ if p, _ := typ.(*Pointer); p != nil {
+ return p.base, true
+ }
+ return typ, false
+}
+
+// derefStructPtr dereferences typ if it is a (named or unnamed) pointer to a
+// (named or unnamed) struct and returns its base. Otherwise it returns typ.
+func derefStructPtr(typ Type) Type {
+ if p, _ := typ.Underlying().(*Pointer); p != nil {
+ if _, ok := p.base.Underlying().(*Struct); ok {
+ return p.base
+ }
+ }
+ return typ
+}
+
+// concat returns the result of concatenating list and i.
+// The result does not share its underlying array with list.
+func concat(list []int, i int) []int {
+ var t []int
+ t = append(t, list...)
+ return append(t, i)
+}
+
+// fieldIndex returns the index for the field with matching package and name, or a value < 0.
+func fieldIndex(fields []*Var, pkg *Package, name string) int {
+ if name != "_" {
+ for i, f := range fields {
+ if f.sameId(pkg, name) {
+ return i
+ }
+ }
+ }
+ return -1
+}
+
+// lookupMethod returns the index of and method with matching package and name, or (-1, nil).
+func lookupMethod(methods []*Func, pkg *Package, name string) (int, *Func) {
+ if name != "_" {
+ for i, m := range methods {
+ if m.sameId(pkg, name) {
+ return i, m
+ }
+ }
+ }
+ return -1, nil
+}
diff --git a/llgo/third_party/go.tools/go/types/methodset.go b/llgo/third_party/go.tools/go/types/methodset.go
new file mode 100644
index 0000000000000000000000000000000000000000..8aff6f9ba4fc8e7ccaef483ddb19f3c2cb664f30
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/methodset.go
@@ -0,0 +1,271 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements method sets.
+
+package types
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+)
+
+// A MethodSet is an ordered set of concrete or abstract (interface) methods;
+// a method is a MethodVal selection, and they are ordered by ascending m.Obj().Id().
+// The zero value for a MethodSet is a ready-to-use empty method set.
+type MethodSet struct {
+ list []*Selection
+}
+
+func (s *MethodSet) String() string {
+ if s.Len() == 0 {
+ return "MethodSet {}"
+ }
+
+ var buf bytes.Buffer
+ fmt.Fprintln(&buf, "MethodSet {")
+ for _, f := range s.list {
+ fmt.Fprintf(&buf, "\t%s\n", f)
+ }
+ fmt.Fprintln(&buf, "}")
+ return buf.String()
+}
+
+// Len returns the number of methods in s.
+func (s *MethodSet) Len() int { return len(s.list) }
+
+// At returns the i'th method in s for 0 <= i < s.Len().
+func (s *MethodSet) At(i int) *Selection { return s.list[i] }
+
+// Lookup returns the method with matching package and name, or nil if not found.
+func (s *MethodSet) Lookup(pkg *Package, name string) *Selection {
+ if s.Len() == 0 {
+ return nil
+ }
+
+ key := Id(pkg, name)
+ i := sort.Search(len(s.list), func(i int) bool {
+ m := s.list[i]
+ return m.obj.Id() >= key
+ })
+ if i < len(s.list) {
+ m := s.list[i]
+ if m.obj.Id() == key {
+ return m
+ }
+ }
+ return nil
+}
+
+// Shared empty method set.
+var emptyMethodSet MethodSet
+
+// NewMethodSet returns the method set for the given type T. It
+// always returns a non-nil method set, even if it is empty.
+//
+// A MethodSetCache handles repeat queries more efficiently.
+//
+func NewMethodSet(T Type) *MethodSet {
+ // WARNING: The code in this function is extremely subtle - do not modify casually!
+ // This function and lookupFieldOrMethod should be kept in sync.
+
+ // method set up to the current depth, allocated lazily
+ var base methodSet
+
+ typ, isPtr := deref(T)
+ named, _ := typ.(*Named)
+
+ // *typ where typ is an interface has no methods.
+ if isPtr {
+ utyp := typ
+ if named != nil {
+ utyp = named.underlying
+ }
+ if _, ok := utyp.(*Interface); ok {
+ return &emptyMethodSet
+ }
+ }
+
+ // Start with typ as single entry at shallowest depth.
+ // If typ is not a named type, insert a nil type instead.
+ current := []embeddedType{{named, nil, isPtr, false}}
+
+ // named types that we have seen already, allocated lazily
+ var seen map[*Named]bool
+
+ // collect methods at current depth
+ for len(current) > 0 {
+ var next []embeddedType // embedded types found at current depth
+
+ // field and method sets at current depth, allocated lazily
+ var fset fieldSet
+ var mset methodSet
+
+ for _, e := range current {
+ // The very first time only, e.typ may be nil.
+ // In this case, we don't have a named type and
+ // we simply continue with the underlying type.
+ if e.typ != nil {
+ if seen[e.typ] {
+ // We have seen this type before, at a more shallow depth
+ // (note that multiples of this type at the current depth
+ // were consolidated before). The type at that depth shadows
+ // this same type at the current depth, so we can ignore
+ // this one.
+ continue
+ }
+ if seen == nil {
+ seen = make(map[*Named]bool)
+ }
+ seen[e.typ] = true
+
+ mset = mset.add(e.typ.methods, e.index, e.indirect, e.multiples)
+
+ // continue with underlying type
+ typ = e.typ.underlying
+ }
+
+ switch t := typ.(type) {
+ case *Struct:
+ for i, f := range t.fields {
+ fset = fset.add(f, e.multiples)
+
+ // Embedded fields are always of the form T or *T where
+ // T is a named type. If typ appeared multiple times at
+ // this depth, f.Type appears multiple times at the next
+ // depth.
+ if f.anonymous {
+ // Ignore embedded basic types - only user-defined
+ // named types can have methods or struct fields.
+ typ, isPtr := deref(f.typ)
+ if t, _ := typ.(*Named); t != nil {
+ next = append(next, embeddedType{t, concat(e.index, i), e.indirect || isPtr, e.multiples})
+ }
+ }
+ }
+
+ case *Interface:
+ mset = mset.add(t.allMethods, e.index, true, e.multiples)
+ }
+ }
+
+ // Add methods and collisions at this depth to base if no entries with matching
+ // names exist already.
+ for k, m := range mset {
+ if _, found := base[k]; !found {
+ // Fields collide with methods of the same name at this depth.
+ if _, found := fset[k]; found {
+ m = nil // collision
+ }
+ if base == nil {
+ base = make(methodSet)
+ }
+ base[k] = m
+ }
+ }
+
+ // Multiple fields with matching names collide at this depth and shadow all
+ // entries further down; add them as collisions to base if no entries with
+ // matching names exist already.
+ for k, f := range fset {
+ if f == nil {
+ if _, found := base[k]; !found {
+ if base == nil {
+ base = make(methodSet)
+ }
+ base[k] = nil // collision
+ }
+ }
+ }
+
+ current = consolidateMultiples(next)
+ }
+
+ if len(base) == 0 {
+ return &emptyMethodSet
+ }
+
+ // collect methods
+ var list []*Selection
+ for _, m := range base {
+ if m != nil {
+ m.recv = T
+ list = append(list, m)
+ }
+ }
+ sort.Sort(byUniqueName(list))
+ return &MethodSet{list}
+}
+
+// A fieldSet is a set of fields and name collisions.
+// A collision indicates that multiple fields with the
+// same unique id appeared.
+type fieldSet map[string]*Var // a nil entry indicates a name collision
+
+// Add adds field f to the field set s.
+// If multiples is set, f appears multiple times
+// and is treated as a collision.
+func (s fieldSet) add(f *Var, multiples bool) fieldSet {
+ if s == nil {
+ s = make(fieldSet)
+ }
+ key := f.Id()
+ // if f is not in the set, add it
+ if !multiples {
+ if _, found := s[key]; !found {
+ s[key] = f
+ return s
+ }
+ }
+ s[key] = nil // collision
+ return s
+}
+
+// A methodSet is a set of methods and name collisions.
+// A collision indicates that multiple methods with the
+// same unique id appeared.
+type methodSet map[string]*Selection // a nil entry indicates a name collision
+
+// Add adds all functions in list to the method set s.
+// If multiples is set, every function in list appears multiple times
+// and is treated as a collision.
+func (s methodSet) add(list []*Func, index []int, indirect bool, multiples bool) methodSet {
+ if len(list) == 0 {
+ return s
+ }
+ if s == nil {
+ s = make(methodSet)
+ }
+ for i, f := range list {
+ key := f.Id()
+ // if f is not in the set, add it
+ if !multiples {
+ // TODO(gri) A found method may not be added because it's not in the method set
+ // (!indirect && ptrRecv(f)). A 2nd method on the same level may be in the method
+ // set and may not collide with the first one, thus leading to a false positive.
+ // Is that possible? Investigate.
+ if _, found := s[key]; !found && (indirect || !ptrRecv(f)) {
+ s[key] = &Selection{MethodVal, nil, f, concat(index, i), indirect}
+ continue
+ }
+ }
+ s[key] = nil // collision
+ }
+ return s
+}
+
+// ptrRecv reports whether the receiver is of the form *T.
+// The receiver must exist.
+func ptrRecv(f *Func) bool {
+ _, isPtr := deref(f.typ.(*Signature).recv.typ)
+ return isPtr
+}
+
+// byUniqueName function lists can be sorted by their unique names.
+type byUniqueName []*Selection
+
+func (a byUniqueName) Len() int { return len(a) }
+func (a byUniqueName) Less(i, j int) bool { return a[i].obj.Id() < a[j].obj.Id() }
+func (a byUniqueName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
diff --git a/llgo/third_party/go.tools/go/types/methodsetcache.go b/llgo/third_party/go.tools/go/types/methodsetcache.go
new file mode 100644
index 0000000000000000000000000000000000000000..5a482e952a4a52d5ad938b4a5ebf54fd9a9ea911
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/methodsetcache.go
@@ -0,0 +1,69 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a cache of method sets.
+
+package types
+
+import "sync"
+
+// A MethodSetCache records the method set of each type T for which
+// MethodSet(T) is called so that repeat queries are fast.
+// The zero value is a ready-to-use cache instance.
+type MethodSetCache struct {
+ mu sync.Mutex
+ named map[*Named]struct{ value, pointer *MethodSet } // method sets for named N and *N
+ others map[Type]*MethodSet // all other types
+}
+
+// MethodSet returns the method set of type T. It is thread-safe.
+//
+// If cache is nil, this function is equivalent to NewMethodSet(T).
+// Utility functions can thus expose an optional *MethodSetCache
+// parameter to clients that care about performance.
+//
+func (cache *MethodSetCache) MethodSet(T Type) *MethodSet {
+ if cache == nil {
+ return NewMethodSet(T)
+ }
+ cache.mu.Lock()
+ defer cache.mu.Unlock()
+
+ switch T := T.(type) {
+ case *Named:
+ return cache.lookupNamed(T).value
+
+ case *Pointer:
+ if N, ok := T.Elem().(*Named); ok {
+ return cache.lookupNamed(N).pointer
+ }
+ }
+
+ // all other types
+ // (The map uses pointer equivalence, not type identity.)
+ mset := cache.others[T]
+ if mset == nil {
+ mset = NewMethodSet(T)
+ if cache.others == nil {
+ cache.others = make(map[Type]*MethodSet)
+ }
+ cache.others[T] = mset
+ }
+ return mset
+}
+
+func (cache *MethodSetCache) lookupNamed(named *Named) struct{ value, pointer *MethodSet } {
+ if cache.named == nil {
+ cache.named = make(map[*Named]struct{ value, pointer *MethodSet })
+ }
+ // Avoid recomputing mset(*T) for each distinct Pointer
+ // instance whose underlying type is a named type.
+ msets, ok := cache.named[named]
+ if !ok {
+ msets.value = NewMethodSet(named)
+ msets.pointer = NewMethodSet(NewPointer(named))
+ cache.named[named] = msets
+ }
+ return msets
+}
diff --git a/llgo/third_party/go.tools/go/types/object.go b/llgo/third_party/go.tools/go/types/object.go
new file mode 100644
index 0000000000000000000000000000000000000000..e153cd04d3ef5c33507d3e33539e2b7ca19d25a2
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/object.go
@@ -0,0 +1,340 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+)
+
+// TODO(gri) Document factory, accessor methods, and fields. General clean-up.
+
+// An Object describes a named language entity such as a package,
+// constant, type, variable, function (incl. methods), or label.
+// All objects implement the Object interface.
+//
+type Object interface {
+ Parent() *Scope // scope in which this object is declared
+ Pos() token.Pos // position of object identifier in declaration
+ Pkg() *Package // nil for objects in the Universe scope and labels
+ Name() string // package local object name
+ Type() Type // object type
+ Exported() bool // reports whether the name starts with a capital letter
+ Id() string // object id (see Id below)
+
+ // String returns a human-readable string of the object.
+ String() string
+
+ // order reflects a package-level object's source order: if object
+ // a is before object b in the source, then a.order() < b.order().
+ // order returns a value > 0 for package-level objects; it returns
+ // 0 for all other objects (including objects in file scopes).
+ order() uint32
+
+ // setOrder sets the order number of the object. It must be > 0.
+ setOrder(uint32)
+
+ // setParent sets the parent scope of the object.
+ setParent(*Scope)
+
+ // sameId reports whether obj.Id() and Id(pkg, name) are the same.
+ sameId(pkg *Package, name string) bool
+}
+
+// Id returns name if it is exported, otherwise it
+// returns the name qualified with the package path.
+func Id(pkg *Package, name string) string {
+ if ast.IsExported(name) {
+ return name
+ }
+ // unexported names need the package path for differentiation
+ // (if there's no package, make sure we don't start with '.'
+ // as that may change the order of methods between a setup
+ // inside a package and outside a package - which breaks some
+ // tests)
+ path := "_"
+ // TODO(gri): shouldn't !ast.IsExported(name) => pkg != nil be an precondition?
+ // if pkg == nil {
+ // panic("nil package in lookup of unexported name")
+ // }
+ if pkg != nil {
+ path = pkg.path
+ if path == "" {
+ path = "_"
+ }
+ }
+ return path + "." + name
+}
+
+// An object implements the common parts of an Object.
+type object struct {
+ parent *Scope
+ pos token.Pos
+ pkg *Package
+ name string
+ typ Type
+ order_ uint32
+}
+
+func (obj *object) Parent() *Scope { return obj.parent }
+func (obj *object) Pos() token.Pos { return obj.pos }
+func (obj *object) Pkg() *Package { return obj.pkg }
+func (obj *object) Name() string { return obj.name }
+func (obj *object) Type() Type { return obj.typ }
+func (obj *object) Exported() bool { return ast.IsExported(obj.name) }
+func (obj *object) Id() string { return Id(obj.pkg, obj.name) }
+func (obj *object) String() string { panic("abstract") }
+func (obj *object) order() uint32 { return obj.order_ }
+
+func (obj *object) setOrder(order uint32) { assert(order > 0); obj.order_ = order }
+func (obj *object) setParent(parent *Scope) { obj.parent = parent }
+
+func (obj *object) sameId(pkg *Package, name string) bool {
+ // spec:
+ // "Two identifiers are different if they are spelled differently,
+ // or if they appear in different packages and are not exported.
+ // Otherwise, they are the same."
+ if name != obj.name {
+ return false
+ }
+ // obj.Name == name
+ if obj.Exported() {
+ return true
+ }
+ // not exported, so packages must be the same (pkg == nil for
+ // fields in Universe scope; this can only happen for types
+ // introduced via Eval)
+ if pkg == nil || obj.pkg == nil {
+ return pkg == obj.pkg
+ }
+ // pkg != nil && obj.pkg != nil
+ return pkg.path == obj.pkg.path
+}
+
+// A PkgName represents an imported Go package.
+type PkgName struct {
+ object
+ imported *Package
+ used bool // set if the package was used
+}
+
+func NewPkgName(pos token.Pos, pkg *Package, name string, imported *Package) *PkgName {
+ return &PkgName{object{nil, pos, pkg, name, Typ[Invalid], 0}, imported, false}
+}
+
+// Imported returns the package that was imported.
+// It is distinct from Pkg(), which is the package containing the import statement.
+func (obj *PkgName) Imported() *Package { return obj.imported }
+
+// A Const represents a declared constant.
+type Const struct {
+ object
+ val exact.Value
+ visited bool // for initialization cycle detection
+}
+
+func NewConst(pos token.Pos, pkg *Package, name string, typ Type, val exact.Value) *Const {
+ return &Const{object{nil, pos, pkg, name, typ, 0}, val, false}
+}
+
+func (obj *Const) Val() exact.Value { return obj.val }
+
+// A TypeName represents a declared type.
+type TypeName struct {
+ object
+}
+
+func NewTypeName(pos token.Pos, pkg *Package, name string, typ Type) *TypeName {
+ return &TypeName{object{nil, pos, pkg, name, typ, 0}}
+}
+
+// A Variable represents a declared variable (including function parameters and results, and struct fields).
+type Var struct {
+ object
+ anonymous bool // if set, the variable is an anonymous struct field, and name is the type name
+ visited bool // for initialization cycle detection
+ isField bool // var is struct field
+ used bool // set if the variable was used
+}
+
+func NewVar(pos token.Pos, pkg *Package, name string, typ Type) *Var {
+ return &Var{object: object{nil, pos, pkg, name, typ, 0}}
+}
+
+func NewParam(pos token.Pos, pkg *Package, name string, typ Type) *Var {
+ return &Var{object: object{nil, pos, pkg, name, typ, 0}, used: true} // parameters are always 'used'
+}
+
+func NewField(pos token.Pos, pkg *Package, name string, typ Type, anonymous bool) *Var {
+ return &Var{object: object{nil, pos, pkg, name, typ, 0}, anonymous: anonymous, isField: true}
+}
+
+func (obj *Var) Anonymous() bool { return obj.anonymous }
+
+func (obj *Var) IsField() bool { return obj.isField }
+
+// A Func represents a declared function, concrete method, or abstract
+// (interface) method. Its Type() is always a *Signature.
+// An abstract method may belong to many interfaces due to embedding.
+type Func struct {
+ object
+}
+
+func NewFunc(pos token.Pos, pkg *Package, name string, sig *Signature) *Func {
+ // don't store a nil signature
+ var typ Type
+ if sig != nil {
+ typ = sig
+ }
+ return &Func{object{nil, pos, pkg, name, typ, 0}}
+}
+
+// FullName returns the package- or receiver-type-qualified name of
+// function or method obj.
+func (obj *Func) FullName() string {
+ var buf bytes.Buffer
+ writeFuncName(&buf, nil, obj)
+ return buf.String()
+}
+
+func (obj *Func) Scope() *Scope {
+ return obj.typ.(*Signature).scope
+}
+
+// A Label represents a declared label.
+type Label struct {
+ object
+ used bool // set if the label was used
+}
+
+func NewLabel(pos token.Pos, pkg *Package, name string) *Label {
+ return &Label{object{pos: pos, pkg: pkg, name: name, typ: Typ[Invalid]}, false}
+}
+
+// A Builtin represents a built-in function.
+// Builtins don't have a valid type.
+type Builtin struct {
+ object
+ id builtinId
+}
+
+func newBuiltin(id builtinId) *Builtin {
+ return &Builtin{object{name: predeclaredFuncs[id].name, typ: Typ[Invalid]}, id}
+}
+
+// Nil represents the predeclared value nil.
+type Nil struct {
+ object
+}
+
+func writeObject(buf *bytes.Buffer, this *Package, obj Object) {
+ typ := obj.Type()
+ switch obj := obj.(type) {
+ case *PkgName:
+ fmt.Fprintf(buf, "package %s", obj.Name())
+ if path := obj.imported.path; path != "" && path != obj.name {
+ fmt.Fprintf(buf, " (%q)", path)
+ }
+ return
+
+ case *Const:
+ buf.WriteString("const")
+
+ case *TypeName:
+ buf.WriteString("type")
+ typ = typ.Underlying()
+
+ case *Var:
+ if obj.isField {
+ buf.WriteString("field")
+ } else {
+ buf.WriteString("var")
+ }
+
+ case *Func:
+ buf.WriteString("func ")
+ writeFuncName(buf, this, obj)
+ if typ != nil {
+ WriteSignature(buf, this, typ.(*Signature))
+ }
+ return
+
+ case *Label:
+ buf.WriteString("label")
+ typ = nil
+
+ case *Builtin:
+ buf.WriteString("builtin")
+ typ = nil
+
+ case *Nil:
+ buf.WriteString("nil")
+ return
+
+ default:
+ panic(fmt.Sprintf("writeObject(%T)", obj))
+ }
+
+ buf.WriteByte(' ')
+
+ // For package-level objects, package-qualify the name,
+ // except for intra-package references (this != nil).
+ if pkg := obj.Pkg(); pkg != nil && this != pkg && pkg.scope.Lookup(obj.Name()) == obj {
+ buf.WriteString(pkg.path)
+ buf.WriteByte('.')
+ }
+ buf.WriteString(obj.Name())
+ if typ != nil {
+ buf.WriteByte(' ')
+ WriteType(buf, this, typ)
+ }
+}
+
+// ObjectString returns the string form of obj.
+// Object and type names are printed package-qualified
+// only if they do not belong to this package.
+//
+func ObjectString(this *Package, obj Object) string {
+ var buf bytes.Buffer
+ writeObject(&buf, this, obj)
+ return buf.String()
+}
+
+func (obj *PkgName) String() string { return ObjectString(nil, obj) }
+func (obj *Const) String() string { return ObjectString(nil, obj) }
+func (obj *TypeName) String() string { return ObjectString(nil, obj) }
+func (obj *Var) String() string { return ObjectString(nil, obj) }
+func (obj *Func) String() string { return ObjectString(nil, obj) }
+func (obj *Label) String() string { return ObjectString(nil, obj) }
+func (obj *Builtin) String() string { return ObjectString(nil, obj) }
+func (obj *Nil) String() string { return ObjectString(nil, obj) }
+
+func writeFuncName(buf *bytes.Buffer, this *Package, f *Func) {
+ if f.typ != nil {
+ sig := f.typ.(*Signature)
+ if recv := sig.Recv(); recv != nil {
+ buf.WriteByte('(')
+ if _, ok := recv.Type().(*Interface); ok {
+ // gcimporter creates abstract methods of
+ // named interfaces using the interface type
+ // (not the named type) as the receiver.
+ // Don't print it in full.
+ buf.WriteString("interface")
+ } else {
+ WriteType(buf, this, recv.Type())
+ }
+ buf.WriteByte(')')
+ buf.WriteByte('.')
+ } else if f.pkg != nil && f.pkg != this {
+ buf.WriteString(f.pkg.path)
+ buf.WriteByte('.')
+ }
+ }
+ buf.WriteString(f.name)
+}
diff --git a/llgo/third_party/go.tools/go/types/objset.go b/llgo/third_party/go.tools/go/types/objset.go
new file mode 100644
index 0000000000000000000000000000000000000000..55eb74addbae5e69fa19d15f9bd7966e793631b3
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/objset.go
@@ -0,0 +1,31 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements objsets.
+//
+// An objset is similar to a Scope but objset elements
+// are identified by their unique id, instead of their
+// object name.
+
+package types
+
+// An objset is a set of objects identified by their unique id.
+// The zero value for objset is a ready-to-use empty objset.
+type objset map[string]Object // initialized lazily
+
+// insert attempts to insert an object obj into objset s.
+// If s already contains an alternative object alt with
+// the same name, insert leaves s unchanged and returns alt.
+// Otherwise it inserts obj and returns nil.
+func (s *objset) insert(obj Object) Object {
+ id := obj.Id()
+ if alt := (*s)[id]; alt != nil {
+ return alt
+ }
+ if *s == nil {
+ *s = make(map[string]Object)
+ }
+ (*s)[id] = obj
+ return nil
+}
diff --git a/llgo/third_party/go.tools/go/types/operand.go b/llgo/third_party/go.tools/go/types/operand.go
new file mode 100644
index 0000000000000000000000000000000000000000..44a2d1d2632775cade69d5b47cace4dfeeb76e05
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/operand.go
@@ -0,0 +1,287 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file defines operands and associated operations.
+
+package types
+
+import (
+ "bytes"
+ "go/ast"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+)
+
+// An operandMode specifies the (addressing) mode of an operand.
+type operandMode byte
+
+const (
+ invalid operandMode = iota // operand is invalid
+ novalue // operand represents no value (result of a function call w/o result)
+ builtin // operand is a built-in function
+ typexpr // operand is a type
+ constant // operand is a constant; the operand's typ is a Basic type
+ variable // operand is an addressable variable
+ mapindex // operand is a map index expression (acts like a variable on lhs, commaok on rhs of an assignment)
+ value // operand is a computed value
+ commaok // like value, but operand may be used in a comma,ok expression
+)
+
+var operandModeString = [...]string{
+ invalid: "invalid operand",
+ novalue: "no value",
+ builtin: "built-in",
+ typexpr: "type",
+ constant: "constant",
+ variable: "variable",
+ mapindex: "map index expression",
+ value: "value",
+ commaok: "comma, ok expression",
+}
+
+// An operand represents an intermediate value during type checking.
+// Operands have an (addressing) mode, the expression evaluating to
+// the operand, the operand's type, a value for constants, and an id
+// for built-in functions.
+// The zero value of operand is a ready to use invalid operand.
+//
+type operand struct {
+ mode operandMode
+ expr ast.Expr
+ typ Type
+ val exact.Value
+ id builtinId
+}
+
+// pos returns the position of the expression corresponding to x.
+// If x is invalid the position is token.NoPos.
+//
+func (x *operand) pos() token.Pos {
+ // x.expr may not be set if x is invalid
+ if x.expr == nil {
+ return token.NoPos
+ }
+ return x.expr.Pos()
+}
+
+// Operand string formats
+// (not all "untyped" cases can appear due to the type system,
+// but they fall out naturally here)
+//
+// mode format
+//
+// invalid ( )
+// novalue ( )
+// builtin ( )
+// typexpr ( )
+//
+// constant ( )
+// constant ( of type )
+// constant ( )
+// constant ( of type )
+//
+// variable ( )
+// variable ( of type )
+//
+// mapindex ( )
+// mapindex ( of type )
+//
+// value ( )
+// value ( of type )
+//
+// commaok ( )
+// commaok ( of type )
+//
+func operandString(this *Package, x *operand) string {
+ var buf bytes.Buffer
+
+ var expr string
+ if x.expr != nil {
+ expr = ExprString(x.expr)
+ } else {
+ switch x.mode {
+ case builtin:
+ expr = predeclaredFuncs[x.id].name
+ case typexpr:
+ expr = TypeString(this, x.typ)
+ case constant:
+ expr = x.val.String()
+ }
+ }
+
+ // (
+ if expr != "" {
+ buf.WriteString(expr)
+ buf.WriteString(" (")
+ }
+
+ //
+ hasType := false
+ switch x.mode {
+ case invalid, novalue, builtin, typexpr:
+ // no type
+ default:
+ // has type
+ if isUntyped(x.typ) {
+ buf.WriteString(x.typ.(*Basic).name)
+ buf.WriteByte(' ')
+ break
+ }
+ hasType = true
+ }
+
+ //
+ buf.WriteString(operandModeString[x.mode])
+
+ //
+ if x.mode == constant {
+ if s := x.val.String(); s != expr {
+ buf.WriteByte(' ')
+ buf.WriteString(s)
+ }
+ }
+
+ //
+ if hasType {
+ if x.typ != Typ[Invalid] {
+ buf.WriteString(" of type ")
+ WriteType(&buf, this, x.typ)
+ } else {
+ buf.WriteString(" with invalid type")
+ }
+ }
+
+ // )
+ if expr != "" {
+ buf.WriteByte(')')
+ }
+
+ return buf.String()
+}
+
+func (x *operand) String() string {
+ return operandString(nil, x)
+}
+
+// setConst sets x to the untyped constant for literal lit.
+func (x *operand) setConst(tok token.Token, lit string) {
+ val := exact.MakeFromLiteral(lit, tok)
+ if val == nil {
+ // TODO(gri) Should we make it an unknown constant instead?
+ x.mode = invalid
+ return
+ }
+
+ var kind BasicKind
+ switch tok {
+ case token.INT:
+ kind = UntypedInt
+ case token.FLOAT:
+ kind = UntypedFloat
+ case token.IMAG:
+ kind = UntypedComplex
+ case token.CHAR:
+ kind = UntypedRune
+ case token.STRING:
+ kind = UntypedString
+ }
+
+ x.mode = constant
+ x.typ = Typ[kind]
+ x.val = val
+}
+
+// isNil reports whether x is the nil value.
+func (x *operand) isNil() bool {
+ return x.mode == value && x.typ == Typ[UntypedNil]
+}
+
+// TODO(gri) The functions operand.assignableTo, checker.convertUntyped,
+// checker.representable, and checker.assignment are
+// overlapping in functionality. Need to simplify and clean up.
+
+// assignableTo reports whether x is assignable to a variable of type T.
+func (x *operand) assignableTo(conf *Config, T Type) bool {
+ if x.mode == invalid || T == Typ[Invalid] {
+ return true // avoid spurious errors
+ }
+
+ V := x.typ
+
+ // x's type is identical to T
+ if Identical(V, T) {
+ return true
+ }
+
+ Vu := V.Underlying()
+ Tu := T.Underlying()
+
+ // T is an interface type and x implements T
+ // (Do this check first as it might succeed early.)
+ if Ti, ok := Tu.(*Interface); ok {
+ if Implements(x.typ, Ti) {
+ return true
+ }
+ }
+
+ // x's type V and T have identical underlying types
+ // and at least one of V or T is not a named type
+ if Identical(Vu, Tu) && (!isNamed(V) || !isNamed(T)) {
+ return true
+ }
+
+ // x is a bidirectional channel value, T is a channel
+ // type, x's type V and T have identical element types,
+ // and at least one of V or T is not a named type
+ if Vc, ok := Vu.(*Chan); ok && Vc.dir == SendRecv {
+ if Tc, ok := Tu.(*Chan); ok && Identical(Vc.elem, Tc.elem) {
+ return !isNamed(V) || !isNamed(T)
+ }
+ }
+
+ // x is the predeclared identifier nil and T is a pointer,
+ // function, slice, map, channel, or interface type
+ if x.isNil() {
+ switch t := Tu.(type) {
+ case *Basic:
+ if t.kind == UnsafePointer {
+ return true
+ }
+ case *Pointer, *Signature, *Slice, *Map, *Chan, *Interface:
+ return true
+ }
+ return false
+ }
+
+ // x is an untyped constant representable by a value of type T
+ // TODO(gri) This is borrowing from checker.convertUntyped and
+ // checker.representable. Need to clean up.
+ if isUntyped(Vu) {
+ switch t := Tu.(type) {
+ case *Basic:
+ if x.mode == constant {
+ return representableConst(x.val, conf, t.kind, nil)
+ }
+ // The result of a comparison is an untyped boolean,
+ // but may not be a constant.
+ if Vb, _ := Vu.(*Basic); Vb != nil {
+ return Vb.kind == UntypedBool && isBoolean(Tu)
+ }
+ case *Interface:
+ return x.isNil() || t.Empty()
+ case *Pointer, *Signature, *Slice, *Map, *Chan:
+ return x.isNil()
+ }
+ }
+
+ return false
+}
+
+// isInteger reports whether x is a (typed or untyped) integer value.
+func (x *operand) isInteger() bool {
+ return x.mode == invalid ||
+ isInteger(x.typ) ||
+ x.mode == constant && representableConst(x.val, nil, UntypedInt, nil) // no *Config required for UntypedInt
+}
diff --git a/llgo/third_party/go.tools/go/types/ordering.go b/llgo/third_party/go.tools/go/types/ordering.go
new file mode 100644
index 0000000000000000000000000000000000000000..6bb98f2dc10acb16f6e15b6e7da2616ce35ca9ca
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/ordering.go
@@ -0,0 +1,127 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements resolveOrder.
+
+package types
+
+import (
+ "go/ast"
+ "sort"
+)
+
+// resolveOrder computes the order in which package-level objects
+// must be type-checked.
+//
+// Interface types appear first in the list, sorted topologically
+// by dependencies on embedded interfaces that are also declared
+// in this package, followed by all other objects sorted in source
+// order.
+//
+// TODO(gri) Consider sorting all types by dependencies here, and
+// in the process check _and_ report type cycles. This may simplify
+// the full type-checking phase.
+//
+func (check *Checker) resolveOrder() []Object {
+ var ifaces, others []Object
+
+ // collect interface types with their dependencies, and all other objects
+ for obj := range check.objMap {
+ if ityp := check.interfaceFor(obj); ityp != nil {
+ ifaces = append(ifaces, obj)
+ // determine dependencies on embedded interfaces
+ for _, f := range ityp.Methods.List {
+ if len(f.Names) == 0 {
+ // Embedded interface: The type must be a (possibly
+ // qualified) identifier denoting another interface.
+ // Imported interfaces are already fully resolved,
+ // so we can ignore qualified identifiers.
+ if ident, _ := f.Type.(*ast.Ident); ident != nil {
+ embedded := check.pkg.scope.Lookup(ident.Name)
+ if check.interfaceFor(embedded) != nil {
+ check.objMap[obj].addDep(embedded)
+ }
+ }
+ }
+ }
+ } else {
+ others = append(others, obj)
+ }
+ }
+
+ // final object order
+ var order []Object
+
+ // sort interface types topologically by dependencies,
+ // and in source order if there are no dependencies
+ sort.Sort(inSourceOrder(ifaces))
+ if debug {
+ for _, obj := range ifaces {
+ assert(check.objMap[obj].mark == 0)
+ }
+ }
+ for _, obj := range ifaces {
+ check.appendInPostOrder(&order, obj)
+ }
+
+ // sort everything else in source order
+ sort.Sort(inSourceOrder(others))
+
+ return append(order, others...)
+}
+
+// interfaceFor returns the AST interface denoted by obj, or nil.
+func (check *Checker) interfaceFor(obj Object) *ast.InterfaceType {
+ tname, _ := obj.(*TypeName)
+ if tname == nil {
+ return nil // not a type
+ }
+ d := check.objMap[obj]
+ if d == nil {
+ check.dump("%s: %s should have been declared", obj.Pos(), obj.Name())
+ unreachable()
+ }
+ if d.typ == nil {
+ return nil // invalid AST - ignore (will be handled later)
+ }
+ ityp, _ := d.typ.(*ast.InterfaceType)
+ return ityp
+}
+
+func (check *Checker) appendInPostOrder(order *[]Object, obj Object) {
+ d := check.objMap[obj]
+ if d.mark != 0 {
+ // We've already seen this object; either because it's
+ // already added to order, or because we have a cycle.
+ // In both cases we stop. Cycle errors are reported
+ // when type-checking types.
+ return
+ }
+ d.mark = 1
+
+ for _, obj := range orderedSetObjects(d.deps) {
+ check.appendInPostOrder(order, obj)
+ }
+
+ *order = append(*order, obj)
+}
+
+func orderedSetObjects(set map[Object]bool) []Object {
+ list := make([]Object, len(set))
+ i := 0
+ for obj := range set {
+ // we don't care about the map element value
+ list[i] = obj
+ i++
+ }
+ sort.Sort(inSourceOrder(list))
+ return list
+}
+
+// inSourceOrder implements the sort.Sort interface.
+type inSourceOrder []Object
+
+func (a inSourceOrder) Len() int { return len(a) }
+func (a inSourceOrder) Less(i, j int) bool { return a[i].order() < a[j].order() }
+func (a inSourceOrder) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
diff --git a/llgo/third_party/go.tools/go/types/package.go b/llgo/third_party/go.tools/go/types/package.go
new file mode 100644
index 0000000000000000000000000000000000000000..366ca3948d63c5fe433d478b83fb759fb78ac345
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/package.go
@@ -0,0 +1,58 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import "fmt"
+
+// A Package describes a Go package.
+type Package struct {
+ path string
+ name string
+ scope *Scope
+ complete bool
+ imports []*Package
+ fake bool // scope lookup errors are silently dropped if package is fake (internal use only)
+}
+
+// NewPackage returns a new Package for the given package path and name;
+// the name must not be the blank identifier.
+// The package is not complete and contains no explicit imports.
+func NewPackage(path, name string) *Package {
+ if name == "_" {
+ panic("invalid package name _")
+ }
+ scope := NewScope(Universe, fmt.Sprintf("package %q", path))
+ return &Package{path: path, name: name, scope: scope}
+}
+
+// Path returns the package path.
+func (pkg *Package) Path() string { return pkg.path }
+
+// Name returns the package name.
+func (pkg *Package) Name() string { return pkg.name }
+
+// Scope returns the (complete or incomplete) package scope
+// holding the objects declared at package level (TypeNames,
+// Consts, Vars, and Funcs).
+func (pkg *Package) Scope() *Scope { return pkg.scope }
+
+// A package is complete if its scope contains (at least) all
+// exported objects; otherwise it is incomplete.
+func (pkg *Package) Complete() bool { return pkg.complete }
+
+// MarkComplete marks a package as complete.
+func (pkg *Package) MarkComplete() { pkg.complete = true }
+
+// Imports returns the list of packages explicitly imported by
+// pkg; the list is in source order. Package unsafe is excluded.
+func (pkg *Package) Imports() []*Package { return pkg.imports }
+
+// SetImports sets the list of explicitly imported packages to list.
+// It is the caller's responsibility to make sure list elements are unique.
+func (pkg *Package) SetImports(list []*Package) { pkg.imports = list }
+
+func (pkg *Package) String() string {
+ return fmt.Sprintf("package %s (%q)", pkg.name, pkg.path)
+}
diff --git a/llgo/third_party/go.tools/go/types/predicates.go b/llgo/third_party/go.tools/go/types/predicates.go
new file mode 100644
index 0000000000000000000000000000000000000000..2e36a7294b3ba9c3247c69d8a92a66a8cb165277
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/predicates.go
@@ -0,0 +1,308 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements commonly used type predicates.
+
+package types
+
+import "sort"
+
+func isNamed(typ Type) bool {
+ if _, ok := typ.(*Basic); ok {
+ return ok
+ }
+ _, ok := typ.(*Named)
+ return ok
+}
+
+func isBoolean(typ Type) bool {
+ t, ok := typ.Underlying().(*Basic)
+ return ok && t.info&IsBoolean != 0
+}
+
+func isInteger(typ Type) bool {
+ t, ok := typ.Underlying().(*Basic)
+ return ok && t.info&IsInteger != 0
+}
+
+func isUnsigned(typ Type) bool {
+ t, ok := typ.Underlying().(*Basic)
+ return ok && t.info&IsUnsigned != 0
+}
+
+func isFloat(typ Type) bool {
+ t, ok := typ.Underlying().(*Basic)
+ return ok && t.info&IsFloat != 0
+}
+
+func isComplex(typ Type) bool {
+ t, ok := typ.Underlying().(*Basic)
+ return ok && t.info&IsComplex != 0
+}
+
+func isNumeric(typ Type) bool {
+ t, ok := typ.Underlying().(*Basic)
+ return ok && t.info&IsNumeric != 0
+}
+
+func isString(typ Type) bool {
+ t, ok := typ.Underlying().(*Basic)
+ return ok && t.info&IsString != 0
+}
+
+func isTyped(typ Type) bool {
+ t, ok := typ.Underlying().(*Basic)
+ return !ok || t.info&IsUntyped == 0
+}
+
+func isUntyped(typ Type) bool {
+ t, ok := typ.Underlying().(*Basic)
+ return ok && t.info&IsUntyped != 0
+}
+
+func isOrdered(typ Type) bool {
+ t, ok := typ.Underlying().(*Basic)
+ return ok && t.info&IsOrdered != 0
+}
+
+func isConstType(typ Type) bool {
+ t, ok := typ.Underlying().(*Basic)
+ return ok && t.info&IsConstType != 0
+}
+
+func isInterface(typ Type) bool {
+ _, ok := typ.Underlying().(*Interface)
+ return ok
+}
+
+// Comparable reports whether values of type T are comparable.
+func Comparable(T Type) bool {
+ switch t := T.Underlying().(type) {
+ case *Basic:
+ // assume invalid types to be comparable
+ // to avoid follow-up errors
+ return t.kind != UntypedNil
+ case *Pointer, *Interface, *Chan:
+ return true
+ case *Struct:
+ for _, f := range t.fields {
+ if !Comparable(f.typ) {
+ return false
+ }
+ }
+ return true
+ case *Array:
+ return Comparable(t.elem)
+ }
+ return false
+}
+
+// hasNil reports whether a type includes the nil value.
+func hasNil(typ Type) bool {
+ switch t := typ.Underlying().(type) {
+ case *Basic:
+ return t.kind == UnsafePointer
+ case *Slice, *Pointer, *Signature, *Interface, *Map, *Chan:
+ return true
+ }
+ return false
+}
+
+// Identical reports whether x and y are identical.
+func Identical(x, y Type) bool {
+ return identical(x, y, nil)
+}
+
+// An ifacePair is a node in a stack of interface type pairs compared for identity.
+type ifacePair struct {
+ x, y *Interface
+ prev *ifacePair
+}
+
+func (p *ifacePair) identical(q *ifacePair) bool {
+ return p.x == q.x && p.y == q.y || p.x == q.y && p.y == q.x
+}
+
+func identical(x, y Type, p *ifacePair) bool {
+ if x == y {
+ return true
+ }
+
+ switch x := x.(type) {
+ case *Basic:
+ // Basic types are singletons except for the rune and byte
+ // aliases, thus we cannot solely rely on the x == y check
+ // above.
+ if y, ok := y.(*Basic); ok {
+ return x.kind == y.kind
+ }
+
+ case *Array:
+ // Two array types are identical if they have identical element types
+ // and the same array length.
+ if y, ok := y.(*Array); ok {
+ return x.len == y.len && identical(x.elem, y.elem, p)
+ }
+
+ case *Slice:
+ // Two slice types are identical if they have identical element types.
+ if y, ok := y.(*Slice); ok {
+ return identical(x.elem, y.elem, p)
+ }
+
+ case *Struct:
+ // Two struct types are identical if they have the same sequence of fields,
+ // and if corresponding fields have the same names, and identical types,
+ // and identical tags. Two anonymous fields are considered to have the same
+ // name. Lower-case field names from different packages are always different.
+ if y, ok := y.(*Struct); ok {
+ if x.NumFields() == y.NumFields() {
+ for i, f := range x.fields {
+ g := y.fields[i]
+ if f.anonymous != g.anonymous ||
+ x.Tag(i) != y.Tag(i) ||
+ !f.sameId(g.pkg, g.name) ||
+ !identical(f.typ, g.typ, p) {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ case *Pointer:
+ // Two pointer types are identical if they have identical base types.
+ if y, ok := y.(*Pointer); ok {
+ return identical(x.base, y.base, p)
+ }
+
+ case *Tuple:
+ // Two tuples types are identical if they have the same number of elements
+ // and corresponding elements have identical types.
+ if y, ok := y.(*Tuple); ok {
+ if x.Len() == y.Len() {
+ if x != nil {
+ for i, v := range x.vars {
+ w := y.vars[i]
+ if !identical(v.typ, w.typ, p) {
+ return false
+ }
+ }
+ }
+ return true
+ }
+ }
+
+ case *Signature:
+ // Two function types are identical if they have the same number of parameters
+ // and result values, corresponding parameter and result types are identical,
+ // and either both functions are variadic or neither is. Parameter and result
+ // names are not required to match.
+ if y, ok := y.(*Signature); ok {
+ return x.variadic == y.variadic &&
+ identical(x.params, y.params, p) &&
+ identical(x.results, y.results, p)
+ }
+
+ case *Interface:
+ // Two interface types are identical if they have the same set of methods with
+ // the same names and identical function types. Lower-case method names from
+ // different packages are always different. The order of the methods is irrelevant.
+ if y, ok := y.(*Interface); ok {
+ a := x.allMethods
+ b := y.allMethods
+ if len(a) == len(b) {
+ // Interface types are the only types where cycles can occur
+ // that are not "terminated" via named types; and such cycles
+ // can only be created via method parameter types that are
+ // anonymous interfaces (directly or indirectly) embedding
+ // the current interface. Example:
+ //
+ // type T interface {
+ // m() interface{T}
+ // }
+ //
+ // If two such (differently named) interfaces are compared,
+ // endless recursion occurs if the cycle is not detected.
+ //
+ // If x and y were compared before, they must be equal
+ // (if they were not, the recursion would have stopped);
+ // search the ifacePair stack for the same pair.
+ //
+ // This is a quadratic algorithm, but in practice these stacks
+ // are extremely short (bounded by the nesting depth of interface
+ // type declarations that recur via parameter types, an extremely
+ // rare occurrence). An alternative implementation might use a
+ // "visited" map, but that is probably less efficient overall.
+ q := &ifacePair{x, y, p}
+ for p != nil {
+ if p.identical(q) {
+ return true // same pair was compared before
+ }
+ p = p.prev
+ }
+ if debug {
+ assert(sort.IsSorted(byUniqueMethodName(a)))
+ assert(sort.IsSorted(byUniqueMethodName(b)))
+ }
+ for i, f := range a {
+ g := b[i]
+ if f.Id() != g.Id() || !identical(f.typ, g.typ, q) {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ case *Map:
+ // Two map types are identical if they have identical key and value types.
+ if y, ok := y.(*Map); ok {
+ return identical(x.key, y.key, p) && identical(x.elem, y.elem, p)
+ }
+
+ case *Chan:
+ // Two channel types are identical if they have identical value types
+ // and the same direction.
+ if y, ok := y.(*Chan); ok {
+ return x.dir == y.dir && identical(x.elem, y.elem, p)
+ }
+
+ case *Named:
+ // Two named types are identical if their type names originate
+ // in the same type declaration.
+ if y, ok := y.(*Named); ok {
+ return x.obj == y.obj
+ }
+
+ default:
+ unreachable()
+ }
+
+ return false
+}
+
+// defaultType returns the default "typed" type for an "untyped" type;
+// it returns the incoming type for all other types. The default type
+// for untyped nil is untyped nil.
+//
+func defaultType(typ Type) Type {
+ if t, ok := typ.(*Basic); ok {
+ switch t.kind {
+ case UntypedBool:
+ return Typ[Bool]
+ case UntypedInt:
+ return Typ[Int]
+ case UntypedRune:
+ return UniverseRune // use 'rune' name
+ case UntypedFloat:
+ return Typ[Float64]
+ case UntypedComplex:
+ return Typ[Complex128]
+ case UntypedString:
+ return Typ[String]
+ }
+ }
+ return typ
+}
diff --git a/llgo/third_party/go.tools/go/types/resolver.go b/llgo/third_party/go.tools/go/types/resolver.go
new file mode 100644
index 0000000000000000000000000000000000000000..88eef9e14b0dde81a8e3575c339a3104f1adf37b
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/resolver.go
@@ -0,0 +1,446 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/token"
+ pathLib "path"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+)
+
+// A declInfo describes a package-level const, type, var, or func declaration.
+type declInfo struct {
+ file *Scope // scope of file containing this declaration
+ lhs []*Var // lhs of n:1 variable declarations, or nil
+ typ ast.Expr // type, or nil
+ init ast.Expr // init expression, or nil
+ fdecl *ast.FuncDecl // func declaration, or nil
+
+ deps map[Object]bool // type and init dependencies; lazily allocated
+ mark int // for dependency analysis
+}
+
+// hasInitializer reports whether the declared object has an initialization
+// expression or function body.
+func (d *declInfo) hasInitializer() bool {
+ return d.init != nil || d.fdecl != nil && d.fdecl.Body != nil
+}
+
+// addDep adds obj as a dependency to d.
+func (d *declInfo) addDep(obj Object) {
+ m := d.deps
+ if m == nil {
+ m = make(map[Object]bool)
+ d.deps = m
+ }
+ m[obj] = true
+}
+
+// arityMatch checks that the lhs and rhs of a const or var decl
+// have the appropriate number of names and init exprs. For const
+// decls, init is the value spec providing the init exprs; for
+// var decls, init is nil (the init exprs are in s in this case).
+func (check *Checker) arityMatch(s, init *ast.ValueSpec) {
+ l := len(s.Names)
+ r := len(s.Values)
+ if init != nil {
+ r = len(init.Values)
+ }
+
+ switch {
+ case init == nil && r == 0:
+ // var decl w/o init expr
+ if s.Type == nil {
+ check.errorf(s.Pos(), "missing type or init expr")
+ }
+ case l < r:
+ if l < len(s.Values) {
+ // init exprs from s
+ n := s.Values[l]
+ check.errorf(n.Pos(), "extra init expr %s", n)
+ // TODO(gri) avoid declared but not used error here
+ } else {
+ // init exprs "inherited"
+ check.errorf(s.Pos(), "extra init expr at %s", init.Pos())
+ // TODO(gri) avoid declared but not used error here
+ }
+ case l > r && (init != nil || r != 1):
+ n := s.Names[r]
+ check.errorf(n.Pos(), "missing init expr for %s", n)
+ }
+}
+
+func validatedImportPath(path string) (string, error) {
+ s, err := strconv.Unquote(path)
+ if err != nil {
+ return "", err
+ }
+ if s == "" {
+ return "", fmt.Errorf("empty string")
+ }
+ const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
+ for _, r := range s {
+ if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
+ return s, fmt.Errorf("invalid character %#U", r)
+ }
+ }
+ return s, nil
+}
+
+// declarePkgObj declares obj in the package scope, records its ident -> obj mapping,
+// and updates check.objMap. The object must not be a function or method.
+func (check *Checker) declarePkgObj(ident *ast.Ident, obj Object, d *declInfo) {
+ assert(ident.Name == obj.Name())
+
+ // spec: "A package-scope or file-scope identifier with name init
+ // may only be declared to be a function with this (func()) signature."
+ if ident.Name == "init" {
+ check.errorf(ident.Pos(), "cannot declare init - must be func")
+ return
+ }
+
+ check.declare(check.pkg.scope, ident, obj)
+ check.objMap[obj] = d
+ obj.setOrder(uint32(len(check.objMap)))
+}
+
+// filename returns a filename suitable for debugging output.
+func (check *Checker) filename(fileNo int) string {
+ file := check.files[fileNo]
+ if pos := file.Pos(); pos.IsValid() {
+ return check.fset.File(pos).Name()
+ }
+ return fmt.Sprintf("file[%d]", fileNo)
+}
+
+// collectObjects collects all file and package objects and inserts them
+// into their respective scopes. It also performs imports and associates
+// methods with receiver base type names.
+func (check *Checker) collectObjects() {
+ pkg := check.pkg
+
+ importer := check.conf.Import
+ if importer == nil {
+ if DefaultImport != nil {
+ importer = DefaultImport
+ } else {
+ // Panic if we encounter an import.
+ importer = func(map[string]*Package, string) (*Package, error) {
+ panic(`no Config.Import or DefaultImport (missing import _ "llvm.org/llgo/third_party/go.tools/go/gcimporter"?)`)
+ }
+ }
+ }
+
+ // pkgImports is the set of packages already imported by any package file seen
+ // so far. Used to avoid duplicate entries in pkg.imports. Allocate and populate
+ // it (pkg.imports may not be empty if we are checking test files incrementally).
+ var pkgImports = make(map[*Package]bool)
+ for _, imp := range pkg.imports {
+ pkgImports[imp] = true
+ }
+
+ for fileNo, file := range check.files {
+ // The package identifier denotes the current package,
+ // but there is no corresponding package object.
+ check.recordDef(file.Name, nil)
+
+ fileScope := NewScope(check.pkg.scope, check.filename(fileNo))
+ check.recordScope(file, fileScope)
+
+ for _, decl := range file.Decls {
+ switch d := decl.(type) {
+ case *ast.BadDecl:
+ // ignore
+
+ case *ast.GenDecl:
+ var last *ast.ValueSpec // last ValueSpec with type or init exprs seen
+ for iota, spec := range d.Specs {
+ switch s := spec.(type) {
+ case *ast.ImportSpec:
+ // import package
+ var imp *Package
+ path, err := validatedImportPath(s.Path.Value)
+ if err != nil {
+ check.errorf(s.Path.Pos(), "invalid import path (%s)", err)
+ continue
+ }
+ if path == "C" && check.conf.FakeImportC {
+ // TODO(gri) shouldn't create a new one each time
+ imp = NewPackage("C", "C")
+ imp.fake = true
+ } else {
+ var err error
+ imp, err = importer(check.conf.Packages, path)
+ if imp == nil && err == nil {
+ err = errors.New("Config.Import returned nil but no error")
+ }
+ if err != nil {
+ check.errorf(s.Path.Pos(), "could not import %s (%s)", path, err)
+ continue
+ }
+ }
+
+ // add package to list of explicit imports
+ // (this functionality is provided as a convenience
+ // for clients; it is not needed for type-checking)
+ if !pkgImports[imp] {
+ pkgImports[imp] = true
+ if imp != Unsafe {
+ pkg.imports = append(pkg.imports, imp)
+ }
+ }
+
+ // local name overrides imported package name
+ name := imp.name
+ if s.Name != nil {
+ name = s.Name.Name
+ if name == "init" {
+ check.errorf(s.Name.Pos(), "cannot declare init - must be func")
+ continue
+ }
+ }
+
+ obj := NewPkgName(s.Pos(), pkg, name, imp)
+ if s.Name != nil {
+ // in a dot-import, the dot represents the package
+ check.recordDef(s.Name, obj)
+ } else {
+ check.recordImplicit(s, obj)
+ }
+
+ // add import to file scope
+ if name == "." {
+ // merge imported scope with file scope
+ for _, obj := range imp.scope.elems {
+ // A package scope may contain non-exported objects,
+ // do not import them!
+ if obj.Exported() {
+ // TODO(gri) When we import a package, we create
+ // a new local package object. We should do the
+ // same for each dot-imported object. That way
+ // they can have correct position information.
+ // (We must not modify their existing position
+ // information because the same package - found
+ // via Config.Packages - may be dot-imported in
+ // another package!)
+ check.declare(fileScope, nil, obj)
+ check.recordImplicit(s, obj)
+ }
+ }
+ // add position to set of dot-import positions for this file
+ // (this is only needed for "imported but not used" errors)
+ check.addUnusedDotImport(fileScope, imp, s.Pos())
+ } else {
+ // declare imported package object in file scope
+ check.declare(fileScope, nil, obj)
+ }
+
+ case *ast.ValueSpec:
+ switch d.Tok {
+ case token.CONST:
+ // determine which initialization expressions to use
+ switch {
+ case s.Type != nil || len(s.Values) > 0:
+ last = s
+ case last == nil:
+ last = new(ast.ValueSpec) // make sure last exists
+ }
+
+ // declare all constants
+ for i, name := range s.Names {
+ obj := NewConst(name.Pos(), pkg, name.Name, nil, exact.MakeInt64(int64(iota)))
+
+ var init ast.Expr
+ if i < len(last.Values) {
+ init = last.Values[i]
+ }
+
+ d := &declInfo{file: fileScope, typ: last.Type, init: init}
+ check.declarePkgObj(name, obj, d)
+ }
+
+ check.arityMatch(s, last)
+
+ case token.VAR:
+ lhs := make([]*Var, len(s.Names))
+ // If there's exactly one rhs initializer, use
+ // the same declInfo d1 for all lhs variables
+ // so that each lhs variable depends on the same
+ // rhs initializer (n:1 var declaration).
+ var d1 *declInfo
+ if len(s.Values) == 1 {
+ // The lhs elements are only set up after the for loop below,
+ // but that's ok because declareVar only collects the declInfo
+ // for a later phase.
+ d1 = &declInfo{file: fileScope, lhs: lhs, typ: s.Type, init: s.Values[0]}
+ }
+
+ // declare all variables
+ for i, name := range s.Names {
+ obj := NewVar(name.Pos(), pkg, name.Name, nil)
+ lhs[i] = obj
+
+ d := d1
+ if d == nil {
+ // individual assignments
+ var init ast.Expr
+ if i < len(s.Values) {
+ init = s.Values[i]
+ }
+ d = &declInfo{file: fileScope, typ: s.Type, init: init}
+ }
+
+ check.declarePkgObj(name, obj, d)
+ }
+
+ check.arityMatch(s, nil)
+
+ default:
+ check.invalidAST(s.Pos(), "invalid token %s", d.Tok)
+ }
+
+ case *ast.TypeSpec:
+ obj := NewTypeName(s.Name.Pos(), pkg, s.Name.Name, nil)
+ check.declarePkgObj(s.Name, obj, &declInfo{file: fileScope, typ: s.Type})
+
+ default:
+ check.invalidAST(s.Pos(), "unknown ast.Spec node %T", s)
+ }
+ }
+
+ case *ast.FuncDecl:
+ name := d.Name.Name
+ obj := NewFunc(d.Name.Pos(), pkg, name, nil)
+ if d.Recv == nil {
+ // regular function
+ if name == "init" {
+ // don't declare init functions in the package scope - they are invisible
+ obj.parent = pkg.scope
+ check.recordDef(d.Name, obj)
+ // init functions must have a body
+ if d.Body == nil {
+ check.softErrorf(obj.pos, "missing function body")
+ }
+ } else {
+ check.declare(pkg.scope, d.Name, obj)
+ }
+ } else {
+ // method
+ check.recordDef(d.Name, obj)
+ // Associate method with receiver base type name, if possible.
+ // Ignore methods that have an invalid receiver, or a blank _
+ // receiver name. They will be type-checked later, with regular
+ // functions.
+ if list := d.Recv.List; len(list) > 0 {
+ typ := list[0].Type
+ if ptr, _ := typ.(*ast.StarExpr); ptr != nil {
+ typ = ptr.X
+ }
+ if base, _ := typ.(*ast.Ident); base != nil && base.Name != "_" {
+ check.assocMethod(base.Name, obj)
+ }
+ }
+ }
+ info := &declInfo{file: fileScope, fdecl: d}
+ check.objMap[obj] = info
+ obj.setOrder(uint32(len(check.objMap)))
+
+ default:
+ check.invalidAST(d.Pos(), "unknown ast.Decl node %T", d)
+ }
+ }
+ }
+
+ // verify that objects in package and file scopes have different names
+ for _, scope := range check.pkg.scope.children /* file scopes */ {
+ for _, obj := range scope.elems {
+ if alt := pkg.scope.Lookup(obj.Name()); alt != nil {
+ if pkg, ok := obj.(*PkgName); ok {
+ check.errorf(alt.Pos(), "%s already declared through import of %s", alt.Name(), pkg.Imported())
+ check.reportAltDecl(pkg)
+ } else {
+ check.errorf(alt.Pos(), "%s already declared through dot-import of %s", alt.Name(), obj.Pkg())
+ // TODO(gri) dot-imported objects don't have a position; reportAltDecl won't print anything
+ check.reportAltDecl(obj)
+ }
+ }
+ }
+ }
+}
+
+// packageObjects typechecks all package objects in objList, but not function bodies.
+func (check *Checker) packageObjects(objList []Object) {
+ // add new methods to already type-checked types (from a prior Checker.Files call)
+ for _, obj := range objList {
+ if obj, _ := obj.(*TypeName); obj != nil && obj.typ != nil {
+ check.addMethodDecls(obj)
+ }
+ }
+
+ // pre-allocate space for type declaration paths so that the underlying array is reused
+ typePath := make([]*TypeName, 0, 8)
+
+ for _, obj := range objList {
+ check.objDecl(obj, nil, typePath)
+ }
+
+ // At this point we may have a non-empty check.methods map; this means that not all
+ // entries were deleted at the end of typeDecl because the respective receiver base
+ // types were not found. In that case, an error was reported when declaring those
+ // methods. We can now safely discard this map.
+ check.methods = nil
+}
+
+// functionBodies typechecks all function bodies.
+func (check *Checker) functionBodies() {
+ for _, f := range check.funcs {
+ check.funcBody(f.decl, f.name, f.sig, f.body)
+ }
+}
+
+// unusedImports checks for unused imports.
+func (check *Checker) unusedImports() {
+ // if function bodies are not checked, packages' uses are likely missing - don't check
+ if check.conf.IgnoreFuncBodies {
+ return
+ }
+
+ // spec: "It is illegal (...) to directly import a package without referring to
+ // any of its exported identifiers. To import a package solely for its side-effects
+ // (initialization), use the blank identifier as explicit package name."
+
+ // check use of regular imported packages
+ for _, scope := range check.pkg.scope.children /* file scopes */ {
+ for _, obj := range scope.elems {
+ if obj, ok := obj.(*PkgName); ok {
+ // Unused "blank imports" are automatically ignored
+ // since _ identifiers are not entered into scopes.
+ if !obj.used {
+ path := obj.imported.path
+ base := pathLib.Base(path)
+ if obj.name == base {
+ check.softErrorf(obj.pos, "%q imported but not used", path)
+ } else {
+ check.softErrorf(obj.pos, "%q imported but not used as %s", path, obj.name)
+ }
+ }
+ }
+ }
+ }
+
+ // check use of dot-imported packages
+ for _, unusedDotImports := range check.unusedDotImports {
+ for pkg, pos := range unusedDotImports {
+ check.softErrorf(pos, "%q imported but not used", pkg.path)
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/resolver_test.go b/llgo/third_party/go.tools/go/types/resolver_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..d4f621e5c9c175698a820016e57bb56fe61c4114
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/resolver_test.go
@@ -0,0 +1,187 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types_test
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "sort"
+ "testing"
+
+ _ "llvm.org/llgo/third_party/go.tools/go/gcimporter"
+ . "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+var sources = []string{
+ `
+ package p
+ import "fmt"
+ import "math"
+ const pi = math.Pi
+ func sin(x float64) float64 {
+ return math.Sin(x)
+ }
+ var Println = fmt.Println
+ `,
+ `
+ package p
+ import "fmt"
+ type errorStringer struct { fmt.Stringer; error }
+ func f() string {
+ _ = "foo"
+ return fmt.Sprintf("%d", g())
+ }
+ func g() (x int) { return }
+ `,
+ `
+ package p
+ import . "go/parser"
+ import "sync"
+ func h() Mode { return ImportsOnly }
+ var _, x int = 1, 2
+ func init() {}
+ type T struct{ *sync.Mutex; a, b, c int}
+ type I interface{ m() }
+ var _ = T{a: 1, b: 2, c: 3}
+ func (_ T) m() {}
+ func (T) _() {}
+ var i I
+ var _ = i.m
+ func _(s []int) { for i, x := range s { _, _ = i, x } }
+ func _(x interface{}) {
+ switch x := x.(type) {
+ case int:
+ _ = x
+ }
+ switch {} // implicit 'true' tag
+ }
+ `,
+ `
+ package p
+ type S struct{}
+ func (T) _() {}
+ func (T) _() {}
+ `,
+ `
+ package p
+ func _() {
+ L0:
+ L1:
+ goto L0
+ for {
+ goto L1
+ }
+ if true {
+ goto L2
+ }
+ L2:
+ }
+ `,
+}
+
+var pkgnames = []string{
+ "fmt",
+ "math",
+}
+
+func TestResolveIdents(t *testing.T) {
+ // parse package files
+ fset := token.NewFileSet()
+ var files []*ast.File
+ for i, src := range sources {
+ f, err := parser.ParseFile(fset, fmt.Sprintf("sources[%d]", i), src, parser.DeclarationErrors)
+ if err != nil {
+ t.Fatal(err)
+ }
+ files = append(files, f)
+ }
+
+ // resolve and type-check package AST
+ var conf Config
+ uses := make(map[*ast.Ident]Object)
+ defs := make(map[*ast.Ident]Object)
+ _, err := conf.Check("testResolveIdents", fset, files, &Info{Defs: defs, Uses: uses})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // check that all packages were imported
+ for _, name := range pkgnames {
+ if conf.Packages[name] == nil {
+ t.Errorf("package %s not imported", name)
+ }
+ }
+
+ // check that qualified identifiers are resolved
+ for _, f := range files {
+ ast.Inspect(f, func(n ast.Node) bool {
+ if s, ok := n.(*ast.SelectorExpr); ok {
+ if x, ok := s.X.(*ast.Ident); ok {
+ obj := uses[x]
+ if obj == nil {
+ t.Errorf("%s: unresolved qualified identifier %s", fset.Position(x.Pos()), x.Name)
+ return false
+ }
+ if _, ok := obj.(*PkgName); ok && uses[s.Sel] == nil {
+ t.Errorf("%s: unresolved selector %s", fset.Position(s.Sel.Pos()), s.Sel.Name)
+ return false
+ }
+ return false
+ }
+ return false
+ }
+ return true
+ })
+ }
+
+ for id, obj := range uses {
+ if obj == nil {
+ t.Errorf("%s: Uses[%s] == nil", fset.Position(id.Pos()), id.Name)
+ }
+ }
+
+ // check that each identifier in the source is found in uses or defs or both
+ var both []string
+ for _, f := range files {
+ ast.Inspect(f, func(n ast.Node) bool {
+ if x, ok := n.(*ast.Ident); ok {
+ var objects int
+ if _, found := uses[x]; found {
+ objects |= 1
+ delete(uses, x)
+ }
+ if _, found := defs[x]; found {
+ objects |= 2
+ delete(defs, x)
+ }
+ if objects == 0 {
+ t.Errorf("%s: unresolved identifier %s", fset.Position(x.Pos()), x.Name)
+ } else if objects == 3 {
+ both = append(both, x.Name)
+ }
+ return false
+ }
+ return true
+ })
+ }
+
+ // check the expected set of idents that are simultaneously uses and defs
+ sort.Strings(both)
+ if got, want := fmt.Sprint(both), "[Mutex Stringer error]"; got != want {
+ t.Errorf("simultaneous uses/defs = %s, want %s", got, want)
+ }
+
+ // any left-over identifiers didn't exist in the source
+ for x := range uses {
+ t.Errorf("%s: identifier %s not present in source", fset.Position(x.Pos()), x.Name)
+ }
+ for x := range defs {
+ t.Errorf("%s: identifier %s not present in source", fset.Position(x.Pos()), x.Name)
+ }
+
+ // TODO(gri) add tests to check ImplicitObj callbacks
+}
diff --git a/llgo/third_party/go.tools/go/types/return.go b/llgo/third_party/go.tools/go/types/return.go
new file mode 100644
index 0000000000000000000000000000000000000000..df5a482ad4478175513879e98ff28a5cf7ff887c
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/return.go
@@ -0,0 +1,185 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements isTerminating.
+
+package types
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+// isTerminating reports if s is a terminating statement.
+// If s is labeled, label is the label name; otherwise s
+// is "".
+func (check *Checker) isTerminating(s ast.Stmt, label string) bool {
+ switch s := s.(type) {
+ default:
+ unreachable()
+
+ case *ast.BadStmt, *ast.DeclStmt, *ast.EmptyStmt, *ast.SendStmt,
+ *ast.IncDecStmt, *ast.AssignStmt, *ast.GoStmt, *ast.DeferStmt,
+ *ast.RangeStmt:
+ // no chance
+
+ case *ast.LabeledStmt:
+ return check.isTerminating(s.Stmt, s.Label.Name)
+
+ case *ast.ExprStmt:
+ // the predeclared (possibly parenthesized) panic() function is terminating
+ if call, _ := unparen(s.X).(*ast.CallExpr); call != nil {
+ if id, _ := call.Fun.(*ast.Ident); id != nil {
+ if _, obj := check.scope.LookupParent(id.Name); obj != nil {
+ if b, _ := obj.(*Builtin); b != nil && b.id == _Panic {
+ return true
+ }
+ }
+ }
+ }
+
+ case *ast.ReturnStmt:
+ return true
+
+ case *ast.BranchStmt:
+ if s.Tok == token.GOTO || s.Tok == token.FALLTHROUGH {
+ return true
+ }
+
+ case *ast.BlockStmt:
+ return check.isTerminatingList(s.List, "")
+
+ case *ast.IfStmt:
+ if s.Else != nil &&
+ check.isTerminating(s.Body, "") &&
+ check.isTerminating(s.Else, "") {
+ return true
+ }
+
+ case *ast.SwitchStmt:
+ return check.isTerminatingSwitch(s.Body, label)
+
+ case *ast.TypeSwitchStmt:
+ return check.isTerminatingSwitch(s.Body, label)
+
+ case *ast.SelectStmt:
+ for _, s := range s.Body.List {
+ cc := s.(*ast.CommClause)
+ if !check.isTerminatingList(cc.Body, "") || hasBreakList(cc.Body, label, true) {
+ return false
+ }
+
+ }
+ return true
+
+ case *ast.ForStmt:
+ if s.Cond == nil && !hasBreak(s.Body, label, true) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (check *Checker) isTerminatingList(list []ast.Stmt, label string) bool {
+ n := len(list)
+ return n > 0 && check.isTerminating(list[n-1], label)
+}
+
+func (check *Checker) isTerminatingSwitch(body *ast.BlockStmt, label string) bool {
+ hasDefault := false
+ for _, s := range body.List {
+ cc := s.(*ast.CaseClause)
+ if cc.List == nil {
+ hasDefault = true
+ }
+ if !check.isTerminatingList(cc.Body, "") || hasBreakList(cc.Body, label, true) {
+ return false
+ }
+ }
+ return hasDefault
+}
+
+// TODO(gri) For nested breakable statements, the current implementation of hasBreak
+// will traverse the same subtree repeatedly, once for each label. Replace
+// with a single-pass label/break matching phase.
+
+// hasBreak reports if s is or contains a break statement
+// referring to the label-ed statement or implicit-ly the
+// closest outer breakable statement.
+func hasBreak(s ast.Stmt, label string, implicit bool) bool {
+ switch s := s.(type) {
+ default:
+ unreachable()
+
+ case *ast.BadStmt, *ast.DeclStmt, *ast.EmptyStmt, *ast.ExprStmt,
+ *ast.SendStmt, *ast.IncDecStmt, *ast.AssignStmt, *ast.GoStmt,
+ *ast.DeferStmt, *ast.ReturnStmt:
+ // no chance
+
+ case *ast.LabeledStmt:
+ return hasBreak(s.Stmt, label, implicit)
+
+ case *ast.BranchStmt:
+ if s.Tok == token.BREAK {
+ if s.Label == nil {
+ return implicit
+ }
+ if s.Label.Name == label {
+ return true
+ }
+ }
+
+ case *ast.BlockStmt:
+ return hasBreakList(s.List, label, implicit)
+
+ case *ast.IfStmt:
+ if hasBreak(s.Body, label, implicit) ||
+ s.Else != nil && hasBreak(s.Else, label, implicit) {
+ return true
+ }
+
+ case *ast.CaseClause:
+ return hasBreakList(s.Body, label, implicit)
+
+ case *ast.SwitchStmt:
+ if label != "" && hasBreak(s.Body, label, false) {
+ return true
+ }
+
+ case *ast.TypeSwitchStmt:
+ if label != "" && hasBreak(s.Body, label, false) {
+ return true
+ }
+
+ case *ast.CommClause:
+ return hasBreakList(s.Body, label, implicit)
+
+ case *ast.SelectStmt:
+ if label != "" && hasBreak(s.Body, label, false) {
+ return true
+ }
+
+ case *ast.ForStmt:
+ if label != "" && hasBreak(s.Body, label, false) {
+ return true
+ }
+
+ case *ast.RangeStmt:
+ if label != "" && hasBreak(s.Body, label, false) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func hasBreakList(list []ast.Stmt, label string, implicit bool) bool {
+ for _, s := range list {
+ if hasBreak(s, label, implicit) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/llgo/third_party/go.tools/go/types/scope.go b/llgo/third_party/go.tools/go/types/scope.go
new file mode 100644
index 0000000000000000000000000000000000000000..8ab0f64feb84011b8c7732cf737ca66841781c61
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/scope.go
@@ -0,0 +1,145 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements Scopes.
+
+package types
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+)
+
+// TODO(gri) Provide scopes with a name or other mechanism so that
+// objects can use that information for better printing.
+
+// A Scope maintains a set of objects and links to its containing
+// (parent) and contained (children) scopes. Objects may be inserted
+// and looked up by name. The zero value for Scope is a ready-to-use
+// empty scope.
+type Scope struct {
+ parent *Scope
+ children []*Scope
+ comment string // for debugging only
+ elems map[string]Object // lazily allocated
+}
+
+// NewScope returns a new, empty scope contained in the given parent
+// scope, if any. The comment is for debugging only.
+func NewScope(parent *Scope, comment string) *Scope {
+ s := &Scope{parent: parent, comment: comment}
+ // don't add children to Universe scope!
+ if parent != nil && parent != Universe {
+ parent.children = append(parent.children, s)
+ }
+ return s
+}
+
+// Parent returns the scope's containing (parent) scope.
+func (s *Scope) Parent() *Scope { return s.parent }
+
+// Len() returns the number of scope elements.
+func (s *Scope) Len() int { return len(s.elems) }
+
+// Names returns the scope's element names in sorted order.
+func (s *Scope) Names() []string {
+ names := make([]string, len(s.elems))
+ i := 0
+ for name := range s.elems {
+ names[i] = name
+ i++
+ }
+ sort.Strings(names)
+ return names
+}
+
+// NumChildren() returns the number of scopes nested in s.
+func (s *Scope) NumChildren() int { return len(s.children) }
+
+// Child returns the i'th child scope for 0 <= i < NumChildren().
+func (s *Scope) Child(i int) *Scope { return s.children[i] }
+
+// Lookup returns the object in scope s with the given name if such an
+// object exists; otherwise the result is nil.
+func (s *Scope) Lookup(name string) Object {
+ return s.elems[name]
+}
+
+// LookupParent follows the parent chain of scopes starting with s until
+// it finds a scope where Lookup(name) returns a non-nil object, and then
+// returns that scope and object. If no such scope exists, the result is (nil, nil).
+//
+// Note that obj.Parent() may be different from the returned scope if the
+// object was inserted into the scope and already had a parent at that
+// time (see Insert, below). This can only happen for dot-imported objects
+// whose scope is the scope of the package that exported them.
+func (s *Scope) LookupParent(name string) (*Scope, Object) {
+ for ; s != nil; s = s.parent {
+ if obj := s.elems[name]; obj != nil {
+ return s, obj
+ }
+ }
+ return nil, nil
+}
+
+// Insert attempts to insert an object obj into scope s.
+// If s already contains an alternative object alt with
+// the same name, Insert leaves s unchanged and returns alt.
+// Otherwise it inserts obj, sets the object's parent scope
+// if not already set, and returns nil.
+func (s *Scope) Insert(obj Object) Object {
+ name := obj.Name()
+ if alt := s.elems[name]; alt != nil {
+ return alt
+ }
+ if s.elems == nil {
+ s.elems = make(map[string]Object)
+ }
+ s.elems[name] = obj
+ if obj.Parent() == nil {
+ obj.setParent(s)
+ }
+ return nil
+}
+
+// WriteTo writes a string representation of the scope to w,
+// with the scope elements sorted by name.
+// The level of indentation is controlled by n >= 0, with
+// n == 0 for no indentation.
+// If recurse is set, it also writes nested (children) scopes.
+func (s *Scope) WriteTo(w io.Writer, n int, recurse bool) {
+ const ind = ". "
+ indn := strings.Repeat(ind, n)
+
+ fmt.Fprintf(w, "%s%s scope %p {", indn, s.comment, s)
+ if len(s.elems) == 0 {
+ fmt.Fprintf(w, "}\n")
+ return
+ }
+
+ fmt.Fprintln(w)
+ indn1 := indn + ind
+ for _, name := range s.Names() {
+ fmt.Fprintf(w, "%s%s\n", indn1, s.elems[name])
+ }
+
+ if recurse {
+ for _, s := range s.children {
+ fmt.Fprintln(w)
+ s.WriteTo(w, n+1, recurse)
+ }
+ }
+
+ fmt.Fprintf(w, "%s}", indn)
+}
+
+// String returns a string representation of the scope, for debugging.
+func (s *Scope) String() string {
+ var buf bytes.Buffer
+ s.WriteTo(&buf, 0, false)
+ return buf.String()
+}
diff --git a/llgo/third_party/go.tools/go/types/selection.go b/llgo/third_party/go.tools/go/types/selection.go
new file mode 100644
index 0000000000000000000000000000000000000000..1c7016550a6de704705efa5e4e5dff74de5bcbc5
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/selection.go
@@ -0,0 +1,143 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements Selections.
+
+package types
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// SelectionKind describes the kind of a selector expression x.f
+// (excluding qualified identifiers).
+type SelectionKind int
+
+const (
+ FieldVal SelectionKind = iota // x.f is a struct field selector
+ MethodVal // x.f is a method selector
+ MethodExpr // x.f is a method expression
+)
+
+// A Selection describes a selector expression x.f.
+// For the declarations:
+//
+// type T struct{ x int; E }
+// type E struct{}
+// func (e E) m() {}
+// var p *T
+//
+// the following relations exist:
+//
+// Selector Kind Recv Obj Type Index Indirect
+//
+// p.x FieldVal T x int {0} true
+// p.m MethodVal *T m func (e *T) m() {1, 0} true
+// T.m MethodExpr T m func m(_ T) {1, 0} false
+//
+type Selection struct {
+ kind SelectionKind
+ recv Type // type of x
+ obj Object // object denoted by x.f
+ index []int // path from x to x.f
+ indirect bool // set if there was any pointer indirection on the path
+}
+
+// Kind returns the selection kind.
+func (s *Selection) Kind() SelectionKind { return s.kind }
+
+// Recv returns the type of x in x.f.
+func (s *Selection) Recv() Type { return s.recv }
+
+// Obj returns the object denoted by x.f; a *Var for
+// a field selection, and a *Func in all other cases.
+func (s *Selection) Obj() Object { return s.obj }
+
+// Type returns the type of x.f, which may be different from the type of f.
+// See Selection for more information.
+func (s *Selection) Type() Type {
+ switch s.kind {
+ case MethodVal:
+ // The type of x.f is a method with its receiver type set
+ // to the type of x.
+ sig := *s.obj.(*Func).typ.(*Signature)
+ recv := *sig.recv
+ recv.typ = s.recv
+ sig.recv = &recv
+ return &sig
+
+ case MethodExpr:
+ // The type of x.f is a function (without receiver)
+ // and an additional first argument with the same type as x.
+ // TODO(gri) Similar code is already in call.go - factor!
+ // TODO(gri) Compute this eagerly to avoid allocations.
+ sig := *s.obj.(*Func).typ.(*Signature)
+ arg0 := *sig.recv
+ sig.recv = nil
+ arg0.typ = s.recv
+ var params []*Var
+ if sig.params != nil {
+ params = sig.params.vars
+ }
+ sig.params = NewTuple(append([]*Var{&arg0}, params...)...)
+ return &sig
+ }
+
+ // In all other cases, the type of x.f is the type of x.
+ return s.obj.Type()
+}
+
+// Index describes the path from x to f in x.f.
+// The last index entry is the field or method index of the type declaring f;
+// either:
+//
+// 1) the list of declared methods of a named type; or
+// 2) the list of methods of an interface type; or
+// 3) the list of fields of a struct type.
+//
+// The earlier index entries are the indices of the embedded fields implicitly
+// traversed to get from (the type of) x to f, starting at embedding depth 0.
+func (s *Selection) Index() []int { return s.index }
+
+// Indirect reports whether any pointer indirection was required to get from
+// x to f in x.f.
+func (s *Selection) Indirect() bool { return s.indirect }
+
+func (s *Selection) String() string { return SelectionString(nil, s) }
+
+// SelectionString returns the string form of s.
+// Type names are printed package-qualified
+// only if they do not belong to this package.
+//
+// Examples:
+// "field (T) f int"
+// "method (T) f(X) Y"
+// "method expr (T) f(X) Y"
+//
+func SelectionString(this *Package, s *Selection) string {
+ var k string
+ switch s.kind {
+ case FieldVal:
+ k = "field "
+ case MethodVal:
+ k = "method "
+ case MethodExpr:
+ k = "method expr "
+ default:
+ unreachable()
+ }
+ var buf bytes.Buffer
+ buf.WriteString(k)
+ buf.WriteByte('(')
+ WriteType(&buf, this, s.Recv())
+ fmt.Fprintf(&buf, ") %s", s.obj.Name())
+ if T := s.Type(); s.kind == FieldVal {
+ buf.WriteByte(' ')
+ WriteType(&buf, this, T)
+ } else {
+ WriteSignature(&buf, this, T.(*Signature))
+ }
+ return buf.String()
+}
diff --git a/llgo/third_party/go.tools/go/types/self_test.go b/llgo/third_party/go.tools/go/types/self_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..54d7636ad688a13c3bf38f360eb26762ff7e07c3
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/self_test.go
@@ -0,0 +1,101 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types_test
+
+import (
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "path/filepath"
+ "testing"
+ "time"
+
+ _ "llvm.org/llgo/third_party/go.tools/go/gcimporter"
+ . "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+var benchmark = flag.Bool("b", false, "run benchmarks")
+
+func TestSelf(t *testing.T) {
+ fset := token.NewFileSet()
+ files, err := pkgFiles(fset, ".")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = Check("go/types", fset, files)
+ if err != nil {
+ // Importing go.tools/go/exact doensn't work in the
+ // build dashboard environment. Don't report an error
+ // for now so that the build remains green.
+ // TODO(gri) fix this
+ t.Log(err) // replace w/ t.Fatal eventually
+ return
+ }
+}
+
+func TestBenchmark(t *testing.T) {
+ if !*benchmark {
+ return
+ }
+
+ // We're not using testing's benchmarking mechanism directly
+ // because we want custom output.
+
+ for _, p := range []string{"types", "exact", "gcimporter"} {
+ path := filepath.Join("..", p)
+ runbench(t, path, false)
+ runbench(t, path, true)
+ fmt.Println()
+ }
+}
+
+func runbench(t *testing.T, path string, ignoreFuncBodies bool) {
+ fset := token.NewFileSet()
+ files, err := pkgFiles(fset, path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ b := testing.Benchmark(func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ conf := Config{IgnoreFuncBodies: ignoreFuncBodies}
+ conf.Check(path, fset, files, nil)
+ }
+ })
+
+ // determine line count
+ lines := 0
+ fset.Iterate(func(f *token.File) bool {
+ lines += f.LineCount()
+ return true
+ })
+
+ d := time.Duration(b.NsPerOp())
+ fmt.Printf(
+ "%s: %s for %d lines (%d lines/s), ignoreFuncBodies = %v\n",
+ filepath.Base(path), d, lines, int64(float64(lines)/d.Seconds()), ignoreFuncBodies,
+ )
+}
+
+func pkgFiles(fset *token.FileSet, path string) ([]*ast.File, error) {
+ filenames, err := pkgFilenames(path) // from stdlib_test.go
+ if err != nil {
+ return nil, err
+ }
+
+ var files []*ast.File
+ for _, filename := range filenames {
+ file, err := parser.ParseFile(fset, filename, nil, 0)
+ if err != nil {
+ return nil, err
+ }
+ files = append(files, file)
+ }
+
+ return files, nil
+}
diff --git a/llgo/third_party/go.tools/go/types/sizes.go b/llgo/third_party/go.tools/go/types/sizes.go
new file mode 100644
index 0000000000000000000000000000000000000000..56fb310c29476eb6234bd0aee2624074e4c5d54e
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/sizes.go
@@ -0,0 +1,211 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements Sizes.
+
+package types
+
+// Sizes defines the sizing functions for package unsafe.
+type Sizes interface {
+ // Alignof returns the alignment of a variable of type T.
+ // Alignof must implement the alignment guarantees required by the spec.
+ Alignof(T Type) int64
+
+ // Offsetsof returns the offsets of the given struct fields, in bytes.
+ // Offsetsof must implement the offset guarantees required by the spec.
+ Offsetsof(fields []*Var) []int64
+
+ // Sizeof returns the size of a variable of type T.
+ // Sizeof must implement the size guarantees required by the spec.
+ Sizeof(T Type) int64
+}
+
+// StdSizes is a convenience type for creating commonly used Sizes.
+// It makes the following simplifying assumptions:
+//
+// - The size of explicitly sized basic types (int16, etc.) is the
+// specified size.
+// - The size of strings and interfaces is 2*WordSize.
+// - The size of slices is 3*WordSize.
+// - The size of an array of n elements corresponds to the size of
+// a struct of n consecutive fields of the array's element type.
+// - The size of a struct is the offset of the last field plus that
+// field's size. As with all element types, if the struct is used
+// in an array its size must first be aligned to a multiple of the
+// struct's alignment.
+// - All other types have size WordSize.
+// - Arrays and structs are aligned per spec definition; all other
+// types are naturally aligned with a maximum alignment MaxAlign.
+//
+// *StdSizes implements Sizes.
+//
+type StdSizes struct {
+ WordSize int64 // word size in bytes - must be >= 4 (32bits)
+ MaxAlign int64 // maximum alignment in bytes - must be >= 1
+}
+
+func (s *StdSizes) Alignof(T Type) int64 {
+ // For arrays and structs, alignment is defined in terms
+ // of alignment of the elements and fields, respectively.
+ switch t := T.Underlying().(type) {
+ case *Array:
+ // spec: "For a variable x of array type: unsafe.Alignof(x)
+ // is the same as unsafe.Alignof(x[0]), but at least 1."
+ return s.Alignof(t.elem)
+ case *Struct:
+ // spec: "For a variable x of struct type: unsafe.Alignof(x)
+ // is the largest of the values unsafe.Alignof(x.f) for each
+ // field f of x, but at least 1."
+ max := int64(1)
+ for _, f := range t.fields {
+ if a := s.Alignof(f.typ); a > max {
+ max = a
+ }
+ }
+ return max
+ }
+ a := s.Sizeof(T) // may be 0
+ // spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1."
+ if a < 1 {
+ return 1
+ }
+ if a > s.MaxAlign {
+ return s.MaxAlign
+ }
+ return a
+}
+
+func (s *StdSizes) Offsetsof(fields []*Var) []int64 {
+ offsets := make([]int64, len(fields))
+ var o int64
+ for i, f := range fields {
+ a := s.Alignof(f.typ)
+ o = align(o, a)
+ offsets[i] = o
+ o += s.Sizeof(f.typ)
+ }
+ return offsets
+}
+
+var basicSizes = [...]byte{
+ Bool: 1,
+ Int8: 1,
+ Int16: 2,
+ Int32: 4,
+ Int64: 8,
+ Uint8: 1,
+ Uint16: 2,
+ Uint32: 4,
+ Uint64: 8,
+ Float32: 4,
+ Float64: 8,
+ Complex64: 8,
+ Complex128: 16,
+}
+
+func (s *StdSizes) Sizeof(T Type) int64 {
+ switch t := T.Underlying().(type) {
+ case *Basic:
+ assert(isTyped(T))
+ k := t.kind
+ if int(k) < len(basicSizes) {
+ if s := basicSizes[k]; s > 0 {
+ return int64(s)
+ }
+ }
+ if k == String {
+ return s.WordSize * 2
+ }
+ case *Array:
+ n := t.len
+ if n == 0 {
+ return 0
+ }
+ a := s.Alignof(t.elem)
+ z := s.Sizeof(t.elem)
+ return align(z, a)*(n-1) + z
+ case *Slice:
+ return s.WordSize * 3
+ case *Struct:
+ n := t.NumFields()
+ if n == 0 {
+ return 0
+ }
+ offsets := t.offsets
+ if t.offsets == nil {
+ // compute offsets on demand
+ offsets = s.Offsetsof(t.fields)
+ t.offsets = offsets
+ }
+ return offsets[n-1] + s.Sizeof(t.fields[n-1].typ)
+ case *Interface:
+ return s.WordSize * 2
+ }
+ return s.WordSize // catch-all
+}
+
+// stdSizes is used if Config.Sizes == nil.
+var stdSizes = StdSizes{8, 8}
+
+func (conf *Config) alignof(T Type) int64 {
+ if s := conf.Sizes; s != nil {
+ if a := s.Alignof(T); a >= 1 {
+ return a
+ }
+ panic("Config.Sizes.Alignof returned an alignment < 1")
+ }
+ return stdSizes.Alignof(T)
+}
+
+func (conf *Config) offsetsof(T *Struct) []int64 {
+ offsets := T.offsets
+ if offsets == nil && T.NumFields() > 0 {
+ // compute offsets on demand
+ if s := conf.Sizes; s != nil {
+ offsets = s.Offsetsof(T.fields)
+ // sanity checks
+ if len(offsets) != T.NumFields() {
+ panic("Config.Sizes.Offsetsof returned the wrong number of offsets")
+ }
+ for _, o := range offsets {
+ if o < 0 {
+ panic("Config.Sizes.Offsetsof returned an offset < 0")
+ }
+ }
+ } else {
+ offsets = stdSizes.Offsetsof(T.fields)
+ }
+ T.offsets = offsets
+ }
+ return offsets
+}
+
+// offsetof returns the offset of the field specified via
+// the index sequence relative to typ. All embedded fields
+// must be structs (rather than pointer to structs).
+func (conf *Config) offsetof(typ Type, index []int) int64 {
+ var o int64
+ for _, i := range index {
+ s := typ.Underlying().(*Struct)
+ o += conf.offsetsof(s)[i]
+ typ = s.fields[i].typ
+ }
+ return o
+}
+
+func (conf *Config) sizeof(T Type) int64 {
+ if s := conf.Sizes; s != nil {
+ if z := s.Sizeof(T); z >= 0 {
+ return z
+ }
+ panic("Config.Sizes.Sizeof returned a size < 0")
+ }
+ return stdSizes.Sizeof(T)
+}
+
+// align returns the smallest y >= x such that y % a == 0.
+func align(x, a int64) int64 {
+ y := x + a - 1
+ return y - y%a
+}
diff --git a/llgo/third_party/go.tools/go/types/stdlib_test.go b/llgo/third_party/go.tools/go/types/stdlib_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..5642e057b07fe3f26d30a7d6f96a2177211d8b1a
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/stdlib_test.go
@@ -0,0 +1,254 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file tests types.Check by using it to
+// typecheck the standard library and tests.
+
+package types_test
+
+import (
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+
+ _ "llvm.org/llgo/third_party/go.tools/go/gcimporter"
+ . "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+var (
+ pkgCount int // number of packages processed
+ start = time.Now()
+)
+
+func TestStdlib(t *testing.T) {
+ walkDirs(t, filepath.Join(runtime.GOROOT(), "src"))
+ if testing.Verbose() {
+ fmt.Println(pkgCount, "packages typechecked in", time.Since(start))
+ }
+}
+
+// firstComment returns the contents of the first comment in
+// the given file, assuming there's one within the first KB.
+func firstComment(filename string) string {
+ f, err := os.Open(filename)
+ if err != nil {
+ return ""
+ }
+ defer f.Close()
+
+ var src [1 << 10]byte // read at most 1KB
+ n, _ := f.Read(src[:])
+
+ var s scanner.Scanner
+ s.Init(fset.AddFile("", fset.Base(), n), src[:n], nil, scanner.ScanComments)
+ for {
+ _, tok, lit := s.Scan()
+ switch tok {
+ case token.COMMENT:
+ // remove trailing */ of multi-line comment
+ if lit[1] == '*' {
+ lit = lit[:len(lit)-2]
+ }
+ return strings.TrimSpace(lit[2:])
+ case token.EOF:
+ return ""
+ }
+ }
+}
+
+func testTestDir(t *testing.T, path string, ignore ...string) {
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ excluded := make(map[string]bool)
+ for _, filename := range ignore {
+ excluded[filename] = true
+ }
+
+ fset := token.NewFileSet()
+ for _, f := range files {
+ // filter directory contents
+ if f.IsDir() || !strings.HasSuffix(f.Name(), ".go") || excluded[f.Name()] {
+ continue
+ }
+
+ // get per-file instructions
+ expectErrors := false
+ filename := filepath.Join(path, f.Name())
+ if cmd := firstComment(filename); cmd != "" {
+ switch cmd {
+ case "skip", "compiledir":
+ continue // ignore this file
+ case "errorcheck":
+ expectErrors = true
+ }
+ }
+
+ // parse and type-check file
+ file, err := parser.ParseFile(fset, filename, nil, 0)
+ if err == nil {
+ _, err = Check(filename, fset, []*ast.File{file})
+ }
+
+ if expectErrors {
+ if err == nil {
+ t.Errorf("expected errors but found none in %s", filename)
+ }
+ } else {
+ if err != nil {
+ t.Error(err)
+ }
+ }
+ }
+}
+
+func TestStdTest(t *testing.T) {
+ testTestDir(t, filepath.Join(runtime.GOROOT(), "test"),
+ "cmplxdivide.go", // also needs file cmplxdivide1.go - ignore
+ "sigchld.go", // don't work on Windows; testTestDir should consult build tags
+ "float_lit2.go", // TODO(gri) enable for releases 1.4 and higher
+ )
+}
+
+func TestStdFixed(t *testing.T) {
+ testTestDir(t, filepath.Join(runtime.GOROOT(), "test", "fixedbugs"),
+ "bug248.go", "bug302.go", "bug369.go", // complex test instructions - ignore
+ "bug459.go", // possibly incorrect test - see issue 6703 (pending spec clarification)
+ "issue3924.go", // possibly incorrect test - see issue 6671 (pending spec clarification)
+ "issue6889.go", // gc-specific test
+ )
+}
+
+func TestStdKen(t *testing.T) {
+ testTestDir(t, filepath.Join(runtime.GOROOT(), "test", "ken"))
+}
+
+// Package paths of excluded packages.
+var excluded = map[string]bool{
+ "builtin": true,
+}
+
+// typecheck typechecks the given package files.
+func typecheck(t *testing.T, path string, filenames []string) {
+ fset := token.NewFileSet()
+
+ // parse package files
+ var files []*ast.File
+ for _, filename := range filenames {
+ file, err := parser.ParseFile(fset, filename, nil, parser.AllErrors)
+ if err != nil {
+ // the parser error may be a list of individual errors; report them all
+ if list, ok := err.(scanner.ErrorList); ok {
+ for _, err := range list {
+ t.Error(err)
+ }
+ return
+ }
+ t.Error(err)
+ return
+ }
+
+ if testing.Verbose() {
+ if len(files) == 0 {
+ fmt.Println("package", file.Name.Name)
+ }
+ fmt.Println("\t", filename)
+ }
+
+ files = append(files, file)
+ }
+
+ // typecheck package files
+ var conf Config
+ conf.Error = func(err error) { t.Error(err) }
+ info := Info{Uses: make(map[*ast.Ident]Object)}
+ conf.Check(path, fset, files, &info)
+ pkgCount++
+
+ // Perform checks of API invariants.
+
+ // All Objects have a package, except predeclared ones.
+ errorError := Universe.Lookup("error").Type().Underlying().(*Interface).ExplicitMethod(0) // (error).Error
+ for id, obj := range info.Uses {
+ predeclared := obj == Universe.Lookup(obj.Name()) || obj == errorError
+ if predeclared == (obj.Pkg() != nil) {
+ posn := fset.Position(id.Pos())
+ if predeclared {
+ t.Errorf("%s: predeclared object with package: %s", posn, obj)
+ } else {
+ t.Errorf("%s: user-defined object without package: %s", posn, obj)
+ }
+ }
+ }
+}
+
+// pkgFilenames returns the list of package filenames for the given directory.
+func pkgFilenames(dir string) ([]string, error) {
+ ctxt := build.Default
+ ctxt.CgoEnabled = false
+ pkg, err := ctxt.ImportDir(dir, 0)
+ if err != nil {
+ if _, nogo := err.(*build.NoGoError); nogo {
+ return nil, nil // no *.go files, not an error
+ }
+ return nil, err
+ }
+ if excluded[pkg.ImportPath] {
+ return nil, nil
+ }
+ var filenames []string
+ for _, name := range pkg.GoFiles {
+ filenames = append(filenames, filepath.Join(pkg.Dir, name))
+ }
+ for _, name := range pkg.TestGoFiles {
+ filenames = append(filenames, filepath.Join(pkg.Dir, name))
+ }
+ return filenames, nil
+}
+
+// Note: Could use filepath.Walk instead of walkDirs but that wouldn't
+// necessarily be shorter or clearer after adding the code to
+// terminate early for -short tests.
+
+func walkDirs(t *testing.T, dir string) {
+ // limit run time for short tests
+ if testing.Short() && time.Since(start) >= 750*time.Millisecond {
+ return
+ }
+
+ fis, err := ioutil.ReadDir(dir)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ // typecheck package in directory
+ files, err := pkgFilenames(dir)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if files != nil {
+ typecheck(t, dir, files)
+ }
+
+ // traverse subdirectories, but don't walk into testdata
+ for _, fi := range fis {
+ if fi.IsDir() && fi.Name() != "testdata" {
+ walkDirs(t, filepath.Join(dir, fi.Name()))
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/stmt.go b/llgo/third_party/go.tools/go/types/stmt.go
new file mode 100644
index 0000000000000000000000000000000000000000..229382ee4f1d1e861df8a0162479f4c6daeae3ba
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/stmt.go
@@ -0,0 +1,729 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of statements.
+
+package types
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+)
+
+func (check *Checker) funcBody(decl *declInfo, name string, sig *Signature, body *ast.BlockStmt) {
+ if trace {
+ if name == "" {
+ name = ""
+ }
+ fmt.Printf("--- %s: %s {\n", name, sig)
+ defer fmt.Println("--- ")
+ }
+
+ // save/restore current context and setup function context
+ // (and use 0 indentation at function start)
+ defer func(ctxt context, indent int) {
+ check.context = ctxt
+ check.indent = indent
+ }(check.context, check.indent)
+ check.context = context{
+ decl: decl,
+ scope: sig.scope,
+ sig: sig,
+ }
+ check.indent = 0
+
+ check.stmtList(0, body.List)
+
+ if check.hasLabel {
+ check.labels(body)
+ }
+
+ if sig.results.Len() > 0 && !check.isTerminating(body, "") {
+ check.error(body.Rbrace, "missing return")
+ }
+
+ // spec: "Implementation restriction: A compiler may make it illegal to
+ // declare a variable inside a function body if the variable is never used."
+ // (One could check each scope after use, but that distributes this check
+ // over several places because CloseScope is not always called explicitly.)
+ check.usage(sig.scope)
+}
+
+func (check *Checker) usage(scope *Scope) {
+ for _, obj := range scope.elems {
+ if v, _ := obj.(*Var); v != nil && !v.used {
+ check.softErrorf(v.pos, "%s declared but not used", v.name)
+ }
+ }
+ for _, scope := range scope.children {
+ check.usage(scope)
+ }
+}
+
+// stmtContext is a bitset describing which
+// control-flow statements are permissible.
+type stmtContext uint
+
+const (
+ breakOk stmtContext = 1 << iota
+ continueOk
+ fallthroughOk
+)
+
+func (check *Checker) simpleStmt(s ast.Stmt) {
+ if s != nil {
+ check.stmt(0, s)
+ }
+}
+
+func (check *Checker) stmtList(ctxt stmtContext, list []ast.Stmt) {
+ ok := ctxt&fallthroughOk != 0
+ inner := ctxt &^ fallthroughOk
+ for i, s := range list {
+ inner := inner
+ if ok && i+1 == len(list) {
+ inner |= fallthroughOk
+ }
+ check.stmt(inner, s)
+ }
+}
+
+func (check *Checker) multipleDefaults(list []ast.Stmt) {
+ var first ast.Stmt
+ for _, s := range list {
+ var d ast.Stmt
+ switch c := s.(type) {
+ case *ast.CaseClause:
+ if len(c.List) == 0 {
+ d = s
+ }
+ case *ast.CommClause:
+ if c.Comm == nil {
+ d = s
+ }
+ default:
+ check.invalidAST(s.Pos(), "case/communication clause expected")
+ }
+ if d != nil {
+ if first != nil {
+ check.errorf(d.Pos(), "multiple defaults (first at %s)", first.Pos())
+ } else {
+ first = d
+ }
+ }
+ }
+}
+
+func (check *Checker) openScope(s ast.Stmt, comment string) {
+ scope := NewScope(check.scope, comment)
+ check.recordScope(s, scope)
+ check.scope = scope
+}
+
+func (check *Checker) closeScope() {
+ check.scope = check.scope.Parent()
+}
+
+func assignOp(op token.Token) token.Token {
+ // token_test.go verifies the token ordering this function relies on
+ if token.ADD_ASSIGN <= op && op <= token.AND_NOT_ASSIGN {
+ return op + (token.ADD - token.ADD_ASSIGN)
+ }
+ return token.ILLEGAL
+}
+
+func (check *Checker) suspendedCall(keyword string, call *ast.CallExpr) {
+ var x operand
+ var msg string
+ switch check.rawExpr(&x, call, nil) {
+ case conversion:
+ msg = "requires function call, not conversion"
+ case expression:
+ msg = "discards result of"
+ case statement:
+ return
+ default:
+ unreachable()
+ }
+ check.errorf(x.pos(), "%s %s %s", keyword, msg, &x)
+}
+
+func (check *Checker) caseValues(x operand /* copy argument (not *operand!) */, values []ast.Expr) {
+ // No duplicate checking for now. See issue 4524.
+ for _, e := range values {
+ var y operand
+ check.expr(&y, e)
+ if y.mode == invalid {
+ return
+ }
+ // TODO(gri) The convertUntyped call pair below appears in other places. Factor!
+ // Order matters: By comparing y against x, error positions are at the case values.
+ check.convertUntyped(&y, x.typ)
+ if y.mode == invalid {
+ return
+ }
+ check.convertUntyped(&x, y.typ)
+ if x.mode == invalid {
+ return
+ }
+ check.comparison(&y, &x, token.EQL)
+ }
+}
+
+func (check *Checker) caseTypes(x *operand, xtyp *Interface, types []ast.Expr, seen map[Type]token.Pos) (T Type) {
+L:
+ for _, e := range types {
+ T = check.typOrNil(e)
+ if T == Typ[Invalid] {
+ continue
+ }
+ // complain about duplicate types
+ // TODO(gri) use a type hash to avoid quadratic algorithm
+ for t, pos := range seen {
+ if T == nil && t == nil || T != nil && t != nil && Identical(T, t) {
+ // talk about "case" rather than "type" because of nil case
+ check.error(e.Pos(), "duplicate case in type switch")
+ check.errorf(pos, "\tprevious case %s", T) // secondary error, \t indented
+ continue L
+ }
+ }
+ seen[T] = e.Pos()
+ if T != nil {
+ check.typeAssertion(e.Pos(), x, xtyp, T)
+ }
+ }
+ return
+}
+
+// stmt typechecks statement s.
+func (check *Checker) stmt(ctxt stmtContext, s ast.Stmt) {
+ // statements cannot use iota in general
+ // (constant declarations set it explicitly)
+ assert(check.iota == nil)
+
+ // statements must end with the same top scope as they started with
+ if debug {
+ defer func(scope *Scope) {
+ // don't check if code is panicking
+ if p := recover(); p != nil {
+ panic(p)
+ }
+ assert(scope == check.scope)
+ }(check.scope)
+ }
+
+ inner := ctxt &^ fallthroughOk
+ switch s := s.(type) {
+ case *ast.BadStmt, *ast.EmptyStmt:
+ // ignore
+
+ case *ast.DeclStmt:
+ check.declStmt(s.Decl)
+
+ case *ast.LabeledStmt:
+ check.hasLabel = true
+ check.stmt(ctxt, s.Stmt)
+
+ case *ast.ExprStmt:
+ // spec: "With the exception of specific built-in functions,
+ // function and method calls and receive operations can appear
+ // in statement context. Such statements may be parenthesized."
+ var x operand
+ kind := check.rawExpr(&x, s.X, nil)
+ var msg string
+ switch x.mode {
+ default:
+ if kind == statement {
+ return
+ }
+ msg = "is not used"
+ case builtin:
+ msg = "must be called"
+ case typexpr:
+ msg = "is not an expression"
+ }
+ check.errorf(x.pos(), "%s %s", &x, msg)
+
+ case *ast.SendStmt:
+ var ch, x operand
+ check.expr(&ch, s.Chan)
+ check.expr(&x, s.Value)
+ if ch.mode == invalid || x.mode == invalid {
+ return
+ }
+ if tch, ok := ch.typ.Underlying().(*Chan); !ok || tch.dir == RecvOnly || !check.assignment(&x, tch.elem) {
+ if x.mode != invalid {
+ check.invalidOp(ch.pos(), "cannot send %s to channel %s", &x, &ch)
+ }
+ }
+
+ case *ast.IncDecStmt:
+ var op token.Token
+ switch s.Tok {
+ case token.INC:
+ op = token.ADD
+ case token.DEC:
+ op = token.SUB
+ default:
+ check.invalidAST(s.TokPos, "unknown inc/dec operation %s", s.Tok)
+ return
+ }
+ var x operand
+ Y := &ast.BasicLit{ValuePos: s.X.Pos(), Kind: token.INT, Value: "1"} // use x's position
+ check.binary(&x, s.X, Y, op)
+ if x.mode == invalid {
+ return
+ }
+ check.assignVar(s.X, &x)
+
+ case *ast.AssignStmt:
+ switch s.Tok {
+ case token.ASSIGN, token.DEFINE:
+ if len(s.Lhs) == 0 {
+ check.invalidAST(s.Pos(), "missing lhs in assignment")
+ return
+ }
+ if s.Tok == token.DEFINE {
+ check.shortVarDecl(s.TokPos, s.Lhs, s.Rhs)
+ } else {
+ // regular assignment
+ check.assignVars(s.Lhs, s.Rhs)
+ }
+
+ default:
+ // assignment operations
+ if len(s.Lhs) != 1 || len(s.Rhs) != 1 {
+ check.errorf(s.TokPos, "assignment operation %s requires single-valued expressions", s.Tok)
+ return
+ }
+ op := assignOp(s.Tok)
+ if op == token.ILLEGAL {
+ check.invalidAST(s.TokPos, "unknown assignment operation %s", s.Tok)
+ return
+ }
+ var x operand
+ check.binary(&x, s.Lhs[0], s.Rhs[0], op)
+ if x.mode == invalid {
+ return
+ }
+ check.assignVar(s.Lhs[0], &x)
+ }
+
+ case *ast.GoStmt:
+ check.suspendedCall("go", s.Call)
+
+ case *ast.DeferStmt:
+ check.suspendedCall("defer", s.Call)
+
+ case *ast.ReturnStmt:
+ res := check.sig.results
+ if res.Len() > 0 {
+ // function returns results
+ // (if one, say the first, result parameter is named, all of them are named)
+ if len(s.Results) == 0 && res.vars[0].name != "" {
+ // spec: "Implementation restriction: A compiler may disallow an empty expression
+ // list in a "return" statement if a different entity (constant, type, or variable)
+ // with the same name as a result parameter is in scope at the place of the return."
+ for _, obj := range res.vars {
+ if _, alt := check.scope.LookupParent(obj.name); alt != nil && alt != obj {
+ check.errorf(s.Pos(), "result parameter %s not in scope at return", obj.name)
+ check.errorf(alt.Pos(), "\tinner declaration of %s", obj)
+ // ok to continue
+ }
+ }
+ } else {
+ // return has results or result parameters are unnamed
+ check.initVars(res.vars, s.Results, s.Return)
+ }
+ } else if len(s.Results) > 0 {
+ check.error(s.Results[0].Pos(), "no result values expected")
+ check.use(s.Results...)
+ }
+
+ case *ast.BranchStmt:
+ if s.Label != nil {
+ check.hasLabel = true
+ return // checked in 2nd pass (check.labels)
+ }
+ switch s.Tok {
+ case token.BREAK:
+ if ctxt&breakOk == 0 {
+ check.error(s.Pos(), "break not in for, switch, or select statement")
+ }
+ case token.CONTINUE:
+ if ctxt&continueOk == 0 {
+ check.error(s.Pos(), "continue not in for statement")
+ }
+ case token.FALLTHROUGH:
+ if ctxt&fallthroughOk == 0 {
+ check.error(s.Pos(), "fallthrough statement out of place")
+ }
+ default:
+ check.invalidAST(s.Pos(), "branch statement: %s", s.Tok)
+ }
+
+ case *ast.BlockStmt:
+ check.openScope(s, "block")
+ defer check.closeScope()
+
+ check.stmtList(inner, s.List)
+
+ case *ast.IfStmt:
+ check.openScope(s, "if")
+ defer check.closeScope()
+
+ check.simpleStmt(s.Init)
+ var x operand
+ check.expr(&x, s.Cond)
+ if x.mode != invalid && !isBoolean(x.typ) {
+ check.error(s.Cond.Pos(), "non-boolean condition in if statement")
+ }
+ check.stmt(inner, s.Body)
+ if s.Else != nil {
+ check.stmt(inner, s.Else)
+ }
+
+ case *ast.SwitchStmt:
+ inner |= breakOk
+ check.openScope(s, "switch")
+ defer check.closeScope()
+
+ check.simpleStmt(s.Init)
+ var x operand
+ if s.Tag != nil {
+ check.expr(&x, s.Tag)
+ } else {
+ // spec: "A missing switch expression is
+ // equivalent to the boolean value true."
+ x.mode = constant
+ x.typ = Typ[Bool]
+ x.val = exact.MakeBool(true)
+ x.expr = &ast.Ident{NamePos: s.Body.Lbrace, Name: "true"}
+ }
+
+ check.multipleDefaults(s.Body.List)
+
+ for i, c := range s.Body.List {
+ clause, _ := c.(*ast.CaseClause)
+ if clause == nil {
+ check.invalidAST(c.Pos(), "incorrect expression switch case")
+ continue
+ }
+ if x.mode != invalid {
+ check.caseValues(x, clause.List)
+ }
+ check.openScope(clause, "case")
+ inner := inner
+ if i+1 < len(s.Body.List) {
+ inner |= fallthroughOk
+ }
+ check.stmtList(inner, clause.Body)
+ check.closeScope()
+ }
+
+ case *ast.TypeSwitchStmt:
+ inner |= breakOk
+ check.openScope(s, "type switch")
+ defer check.closeScope()
+
+ check.simpleStmt(s.Init)
+
+ // A type switch guard must be of the form:
+ //
+ // TypeSwitchGuard = [ identifier ":=" ] PrimaryExpr "." "(" "type" ")" .
+ //
+ // The parser is checking syntactic correctness;
+ // remaining syntactic errors are considered AST errors here.
+ // TODO(gri) better factoring of error handling (invalid ASTs)
+ //
+ var lhs *ast.Ident // lhs identifier or nil
+ var rhs ast.Expr
+ switch guard := s.Assign.(type) {
+ case *ast.ExprStmt:
+ rhs = guard.X
+ case *ast.AssignStmt:
+ if len(guard.Lhs) != 1 || guard.Tok != token.DEFINE || len(guard.Rhs) != 1 {
+ check.invalidAST(s.Pos(), "incorrect form of type switch guard")
+ return
+ }
+
+ lhs, _ = guard.Lhs[0].(*ast.Ident)
+ if lhs == nil {
+ check.invalidAST(s.Pos(), "incorrect form of type switch guard")
+ return
+ }
+ check.recordDef(lhs, nil) // lhs variable is implicitly declared in each cause clause
+
+ rhs = guard.Rhs[0]
+
+ default:
+ check.invalidAST(s.Pos(), "incorrect form of type switch guard")
+ return
+ }
+
+ // rhs must be of the form: expr.(type) and expr must be an interface
+ expr, _ := rhs.(*ast.TypeAssertExpr)
+ if expr == nil || expr.Type != nil {
+ check.invalidAST(s.Pos(), "incorrect form of type switch guard")
+ return
+ }
+ var x operand
+ check.expr(&x, expr.X)
+ if x.mode == invalid {
+ return
+ }
+ xtyp, _ := x.typ.Underlying().(*Interface)
+ if xtyp == nil {
+ check.errorf(x.pos(), "%s is not an interface", &x)
+ return
+ }
+
+ check.multipleDefaults(s.Body.List)
+
+ var lhsVars []*Var // list of implicitly declared lhs variables
+ seen := make(map[Type]token.Pos) // map of seen types to positions
+ for _, s := range s.Body.List {
+ clause, _ := s.(*ast.CaseClause)
+ if clause == nil {
+ check.invalidAST(s.Pos(), "incorrect type switch case")
+ continue
+ }
+ // Check each type in this type switch case.
+ T := check.caseTypes(&x, xtyp, clause.List, seen)
+ check.openScope(clause, "case")
+ // If lhs exists, declare a corresponding variable in the case-local scope.
+ if lhs != nil {
+ // spec: "The TypeSwitchGuard may include a short variable declaration.
+ // When that form is used, the variable is declared at the beginning of
+ // the implicit block in each clause. In clauses with a case listing
+ // exactly one type, the variable has that type; otherwise, the variable
+ // has the type of the expression in the TypeSwitchGuard."
+ if len(clause.List) != 1 || T == nil {
+ T = x.typ
+ }
+ obj := NewVar(lhs.Pos(), check.pkg, lhs.Name, T)
+ check.declare(check.scope, nil, obj)
+ check.recordImplicit(clause, obj)
+ // For the "declared but not used" error, all lhs variables act as
+ // one; i.e., if any one of them is 'used', all of them are 'used'.
+ // Collect them for later analysis.
+ lhsVars = append(lhsVars, obj)
+ }
+ check.stmtList(inner, clause.Body)
+ check.closeScope()
+ }
+
+ // If lhs exists, we must have at least one lhs variable that was used.
+ if lhs != nil {
+ var used bool
+ for _, v := range lhsVars {
+ if v.used {
+ used = true
+ }
+ v.used = true // avoid usage error when checking entire function
+ }
+ if !used {
+ check.softErrorf(lhs.Pos(), "%s declared but not used", lhs.Name)
+ }
+ }
+
+ case *ast.SelectStmt:
+ inner |= breakOk
+
+ check.multipleDefaults(s.Body.List)
+
+ for _, s := range s.Body.List {
+ clause, _ := s.(*ast.CommClause)
+ if clause == nil {
+ continue // error reported before
+ }
+
+ // clause.Comm must be a SendStmt, RecvStmt, or default case
+ valid := false
+ var rhs ast.Expr // rhs of RecvStmt, or nil
+ switch s := clause.Comm.(type) {
+ case nil, *ast.SendStmt:
+ valid = true
+ case *ast.AssignStmt:
+ if len(s.Rhs) == 1 {
+ rhs = s.Rhs[0]
+ }
+ case *ast.ExprStmt:
+ rhs = s.X
+ }
+
+ // if present, rhs must be a receive operation
+ if rhs != nil {
+ if x, _ := unparen(rhs).(*ast.UnaryExpr); x != nil && x.Op == token.ARROW {
+ valid = true
+ }
+ }
+
+ if !valid {
+ check.error(clause.Comm.Pos(), "select case must be send or receive (possibly with assignment)")
+ continue
+ }
+
+ check.openScope(s, "case")
+ defer check.closeScope()
+ if clause.Comm != nil {
+ check.stmt(inner, clause.Comm)
+ }
+ check.stmtList(inner, clause.Body)
+ }
+
+ case *ast.ForStmt:
+ inner |= breakOk | continueOk
+ check.openScope(s, "for")
+ defer check.closeScope()
+
+ check.simpleStmt(s.Init)
+ if s.Cond != nil {
+ var x operand
+ check.expr(&x, s.Cond)
+ if x.mode != invalid && !isBoolean(x.typ) {
+ check.error(s.Cond.Pos(), "non-boolean condition in for statement")
+ }
+ }
+ check.simpleStmt(s.Post)
+ // spec: "The init statement may be a short variable
+ // declaration, but the post statement must not."
+ if s, _ := s.Post.(*ast.AssignStmt); s != nil && s.Tok == token.DEFINE {
+ check.softErrorf(s.Pos(), "cannot declare in post statement")
+ check.use(s.Lhs...) // avoid follow-up errors
+ }
+ check.stmt(inner, s.Body)
+
+ case *ast.RangeStmt:
+ inner |= breakOk | continueOk
+ check.openScope(s, "for")
+ defer check.closeScope()
+
+ // check expression to iterate over
+ decl := s.Tok == token.DEFINE
+ var x operand
+ check.expr(&x, s.X)
+ if x.mode == invalid {
+ // if we don't have a declaration, we can still check the loop's body
+ // (otherwise we can't because we are missing the declared variables)
+ if !decl {
+ check.stmt(inner, s.Body)
+ }
+ return
+ }
+
+ // determine key/value types
+ var key, val Type
+ switch typ := x.typ.Underlying().(type) {
+ case *Basic:
+ if isString(typ) {
+ key = Typ[Int]
+ val = UniverseRune // use 'rune' name
+ }
+ case *Array:
+ key = Typ[Int]
+ val = typ.elem
+ case *Slice:
+ key = Typ[Int]
+ val = typ.elem
+ case *Pointer:
+ if typ, _ := typ.base.Underlying().(*Array); typ != nil {
+ key = Typ[Int]
+ val = typ.elem
+ }
+ case *Map:
+ key = typ.key
+ val = typ.elem
+ case *Chan:
+ key = typ.elem
+ val = Typ[Invalid]
+ if typ.dir == SendOnly {
+ check.errorf(x.pos(), "cannot range over send-only channel %s", &x)
+ // ok to continue
+ }
+ if s.Value != nil {
+ check.errorf(s.Value.Pos(), "iteration over %s permits only one iteration variable", &x)
+ // ok to continue
+ }
+ }
+
+ if key == nil {
+ check.errorf(x.pos(), "cannot range over %s", &x)
+ // if we don't have a declaration, we can still check the loop's body
+ if !decl {
+ check.stmt(inner, s.Body)
+ }
+ return
+ }
+
+ // check assignment to/declaration of iteration variables
+ // (irregular assignment, cannot easily map to existing assignment checks)
+
+ // lhs expressions and initialization value (rhs) types
+ lhs := [2]ast.Expr{s.Key, s.Value}
+ rhs := [2]Type{key, val}
+
+ if decl {
+ // short variable declaration; variable scope starts after the range clause
+ // (the for loop opens a new scope, so variables on the lhs never redeclare
+ // previously declared variables)
+ var vars []*Var
+ for i, lhs := range lhs {
+ if lhs == nil {
+ continue
+ }
+
+ // determine lhs variable
+ var obj *Var
+ if ident, _ := lhs.(*ast.Ident); ident != nil {
+ // declare new variable
+ name := ident.Name
+ obj = NewVar(ident.Pos(), check.pkg, name, nil)
+ check.recordDef(ident, obj)
+ // _ variables don't count as new variables
+ if name != "_" {
+ vars = append(vars, obj)
+ }
+ } else {
+ check.errorf(lhs.Pos(), "cannot declare %s", lhs)
+ obj = NewVar(lhs.Pos(), check.pkg, "_", nil) // dummy variable
+ }
+
+ // initialize lhs variable
+ x.mode = value
+ x.expr = lhs // we don't have a better rhs expression to use here
+ x.typ = rhs[i]
+ check.initVar(obj, &x, false)
+ }
+
+ // declare variables
+ if len(vars) > 0 {
+ for _, obj := range vars {
+ check.declare(check.scope, nil /* recordDef already called */, obj)
+ }
+ } else {
+ check.error(s.TokPos, "no new variables on left side of :=")
+ }
+ } else {
+ // ordinary assignment
+ for i, lhs := range lhs {
+ if lhs == nil {
+ continue
+ }
+ x.mode = value
+ x.expr = lhs // we don't have a better rhs expression to use here
+ x.typ = rhs[i]
+ check.assignVar(lhs, &x)
+ }
+ }
+
+ check.stmt(inner, s.Body)
+
+ default:
+ check.error(s.Pos(), "invalid statement")
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/testdata/blank.src b/llgo/third_party/go.tools/go/types/testdata/blank.src
new file mode 100644
index 0000000000000000000000000000000000000000..6a2507f482e9a88af436d75bfa9c4fd5f4361051
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/blank.src
@@ -0,0 +1,5 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package _ /* ERROR invalid package name */
diff --git a/llgo/third_party/go.tools/go/types/testdata/builtins.src b/llgo/third_party/go.tools/go/types/testdata/builtins.src
new file mode 100644
index 0000000000000000000000000000000000000000..8b405c3f928dcf11f68f17950a869577ffc055b2
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/builtins.src
@@ -0,0 +1,881 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// builtin calls
+
+package builtins
+
+import "unsafe"
+
+func f0() {}
+
+func append1() {
+ var b byte
+ var x int
+ var s []byte
+ _ = append() // ERROR not enough arguments
+ _ = append("foo" /* ERROR not a slice */ )
+ _ = append(nil /* ERROR not a slice */ , s)
+ _ = append(x /* ERROR not a slice */ , s)
+ _ = append(s)
+ append /* ERROR not used */ (s)
+
+ _ = append(s, b)
+ _ = append(s, x /* ERROR cannot pass argument x */ )
+ _ = append(s, s /* ERROR cannot pass argument s */ )
+ _ = append(s /* ERROR can only use ... with matching parameter */ ...)
+ _ = append(s, b, s /* ERROR can only use ... with matching parameter */ ...)
+ _ = append(s, 1, 2, 3)
+ _ = append(s, 1, 2, 3, x /* ERROR cannot pass argument x */ , 5, 6, 6)
+ _ = append(s, 1, 2, s /* ERROR can only use ... with matching parameter */ ...)
+ _ = append([]interface{}(nil), 1, 2, "foo", x, 3.1425, false)
+
+ type S []byte
+ type T string
+ var t T
+ _ = append(s, "foo" /* ERROR cannot convert */ )
+ _ = append(s, "foo"...)
+ _ = append(S(s), "foo" /* ERROR cannot convert */ )
+ _ = append(S(s), "foo"...)
+ _ = append(s, t /* ERROR cannot pass argument t */ )
+ _ = append(s, t...)
+ _ = append(s, T("foo")...)
+ _ = append(S(s), t /* ERROR cannot pass argument t */ )
+ _ = append(S(s), t...)
+ _ = append(S(s), T("foo")...)
+ _ = append([]string{}, t /* ERROR cannot pass argument t */ , "foo")
+ _ = append([]T{}, t, "foo")
+}
+
+// from the spec
+func append2() {
+ s0 := []int{0, 0}
+ s1 := append(s0, 2) // append a single element s1 == []int{0, 0, 2}
+ s2 := append(s1, 3, 5, 7) // append multiple elements s2 == []int{0, 0, 2, 3, 5, 7}
+ s3 := append(s2, s0...) // append a slice s3 == []int{0, 0, 2, 3, 5, 7, 0, 0}
+ s4 := append(s3[3:6], s3[2:]...) // append overlapping slice s4 == []int{3, 5, 7, 2, 3, 5, 7, 0, 0}
+
+ var t []interface{}
+ t = append(t, 42, 3.1415, "foo") // t == []interface{}{42, 3.1415, "foo"}
+
+ var b []byte
+ b = append(b, "bar"...) // append string contents b == []byte{'b', 'a', 'r' }
+
+ _ = s4
+}
+
+func append3() {
+ f1 := func() (s []int) { return }
+ f2 := func() (s []int, x int) { return }
+ f3 := func() (s []int, x, y int) { return }
+ f5 := func() (s []interface{}, x int, y float32, z string, b bool) { return }
+ ff := func() (int, float32) { return 0, 0 }
+ _ = append(f0 /* ERROR used as value */ ())
+ _ = append(f1())
+ _ = append(f2())
+ _ = append(f3())
+ _ = append(f5())
+ _ = append(ff /* ERROR not a slice */ ()) // TODO(gri) better error message
+}
+
+func cap1() {
+ var a [10]bool
+ var p *[20]int
+ var c chan string
+ _ = cap() // ERROR not enough arguments
+ _ = cap(1, 2) // ERROR too many arguments
+ _ = cap(42 /* ERROR invalid */)
+ const _3 = cap(a)
+ assert(_3 == 10)
+ const _4 = cap(p)
+ assert(_4 == 20)
+ _ = cap(c)
+ cap /* ERROR not used */ (c)
+
+ // issue 4744
+ type T struct{ a [10]int }
+ const _ = cap(((*T)(nil)).a)
+
+ var s [][]byte
+ _ = cap(s)
+ _ = cap(s... /* ERROR invalid use of \.\.\. */ )
+}
+
+func cap2() {
+ f1a := func() (a [10]int) { return }
+ f1s := func() (s []int) { return }
+ f2 := func() (s []int, x int) { return }
+ _ = cap(f0 /* ERROR used as value */ ())
+ _ = cap(f1a())
+ _ = cap(f1s())
+ _ = cap(f2()) // ERROR too many arguments
+}
+
+// test cases for issue 7387
+func cap3() {
+ var f = func() int { return 0 }
+ var x = f()
+ const (
+ _ = cap([4]int{})
+ _ = cap([4]int{x})
+ _ = cap /* ERROR not constant */ ([4]int{f()})
+ _ = cap /* ERROR not constant */ ([4]int{cap([]int{})})
+ _ = cap([4]int{cap([4]int{})})
+ )
+ var y float64
+ var z complex128
+ const (
+ _ = cap([4]float64{})
+ _ = cap([4]float64{y})
+ _ = cap([4]float64{real(2i)})
+ _ = cap /* ERROR not constant */ ([4]float64{real(z)})
+ )
+ var ch chan [10]int
+ const (
+ _ = cap /* ERROR not constant */ (<-ch)
+ _ = cap /* ERROR not constant */ ([4]int{(<-ch)[0]})
+ )
+}
+
+func close1() {
+ var c chan int
+ var r <-chan int
+ close() // ERROR not enough arguments
+ close(1, 2) // ERROR too many arguments
+ close(42 /* ERROR not a channel */)
+ close(r /* ERROR receive-only channel */)
+ close(c)
+ _ = close /* ERROR used as value */ (c)
+
+ var s []chan int
+ close(s... /* ERROR invalid use of \.\.\. */ )
+}
+
+func close2() {
+ f1 := func() (ch chan int) { return }
+ f2 := func() (ch chan int, x int) { return }
+ close(f0 /* ERROR used as value */ ())
+ close(f1())
+ close(f2()) // ERROR too many arguments
+}
+
+func complex1() {
+ var i32 int32
+ var f32 float32
+ var f64 float64
+ var c64 complex64
+ var c128 complex128
+ _ = complex() // ERROR not enough arguments
+ _ = complex(1) // ERROR not enough arguments
+ _ = complex(true /* ERROR invalid argument */ , 0)
+ _ = complex(i32 /* ERROR invalid argument */ , 0)
+ _ = complex("foo" /* ERROR invalid argument */ , 0)
+ _ = complex(c64 /* ERROR invalid argument */ , 0)
+ _ = complex(0, true /* ERROR invalid argument */ )
+ _ = complex(0, i32 /* ERROR invalid argument */ )
+ _ = complex(0, "foo" /* ERROR invalid argument */ )
+ _ = complex(0, c64 /* ERROR invalid argument */ )
+ _ = complex(f32, f32)
+ _ = complex(f32, 1)
+ _ = complex(f32, 1.0)
+ _ = complex(f32, 'a')
+ _ = complex(f64, f64)
+ _ = complex(f64, 1)
+ _ = complex(f64, 1.0)
+ _ = complex(f64, 'a')
+ _ = complex(f32 /* ERROR mismatched types */ , f64)
+ _ = complex(f64 /* ERROR mismatched types */ , f32)
+ _ = complex(1, 1)
+ _ = complex(1, 1.1)
+ _ = complex(1, 'a')
+ complex /* ERROR not used */ (1, 2)
+
+ var _ complex64 = complex(f32, f32)
+ var _ complex64 = complex /* ERROR cannot initialize */ (f64, f64)
+
+ var _ complex128 = complex /* ERROR cannot initialize */ (f32, f32)
+ var _ complex128 = complex(f64, f64)
+
+ // untyped constants
+ const _ int = complex(1, 0)
+ const _ float32 = complex(1, 0)
+ const _ complex64 = complex(1, 0)
+ const _ complex128 = complex(1, 0)
+
+ const _ int = complex /* ERROR int */ (1.1, 0)
+ const _ float32 = complex /* ERROR float32 */ (1, 2)
+
+ // untyped values
+ var s uint
+ _ = complex(1 /* ERROR integer */ <>8&1 + mi>>16&1 + mi>>32&1)
+ logSizeofUint = uint(mu>>8&1 + mu>>16&1 + mu>>32&1)
+ logSizeofUintptr = uint(mp>>8&1 + mp>>16&1 + mp>>32&1)
+)
+
+const (
+ minInt8 = -1<<(8< 0)
+ _ = assert(smallestFloat64 > 0)
+)
+
+const (
+ maxFloat32 = 1<<127 * (1<<24 - 1) / (1.0<<23)
+ maxFloat64 = 1<<1023 * (1<<53 - 1) / (1.0<<52)
+)
+
+const (
+ _ int8 = minInt8 /* ERROR "overflows" */ - 1
+ _ int8 = minInt8
+ _ int8 = maxInt8
+ _ int8 = maxInt8 /* ERROR "overflows" */ + 1
+ _ int8 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = int8(minInt8 /* ERROR "cannot convert" */ - 1)
+ _ = int8(minInt8)
+ _ = int8(maxInt8)
+ _ = int8(maxInt8 /* ERROR "cannot convert" */ + 1)
+ _ = int8(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ int16 = minInt16 /* ERROR "overflows" */ - 1
+ _ int16 = minInt16
+ _ int16 = maxInt16
+ _ int16 = maxInt16 /* ERROR "overflows" */ + 1
+ _ int16 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = int16(minInt16 /* ERROR "cannot convert" */ - 1)
+ _ = int16(minInt16)
+ _ = int16(maxInt16)
+ _ = int16(maxInt16 /* ERROR "cannot convert" */ + 1)
+ _ = int16(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ int32 = minInt32 /* ERROR "overflows" */ - 1
+ _ int32 = minInt32
+ _ int32 = maxInt32
+ _ int32 = maxInt32 /* ERROR "overflows" */ + 1
+ _ int32 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = int32(minInt32 /* ERROR "cannot convert" */ - 1)
+ _ = int32(minInt32)
+ _ = int32(maxInt32)
+ _ = int32(maxInt32 /* ERROR "cannot convert" */ + 1)
+ _ = int32(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ int64 = minInt64 /* ERROR "overflows" */ - 1
+ _ int64 = minInt64
+ _ int64 = maxInt64
+ _ int64 = maxInt64 /* ERROR "overflows" */ + 1
+ _ int64 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = int64(minInt64 /* ERROR "cannot convert" */ - 1)
+ _ = int64(minInt64)
+ _ = int64(maxInt64)
+ _ = int64(maxInt64 /* ERROR "cannot convert" */ + 1)
+ _ = int64(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ int = minInt /* ERROR "overflows" */ - 1
+ _ int = minInt
+ _ int = maxInt
+ _ int = maxInt /* ERROR "overflows" */ + 1
+ _ int = smallestFloat64 /* ERROR "truncated" */
+
+ _ = int(minInt /* ERROR "cannot convert" */ - 1)
+ _ = int(minInt)
+ _ = int(maxInt)
+ _ = int(maxInt /* ERROR "cannot convert" */ + 1)
+ _ = int(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ uint8 = 0 /* ERROR "overflows" */ - 1
+ _ uint8 = 0
+ _ uint8 = maxUint8
+ _ uint8 = maxUint8 /* ERROR "overflows" */ + 1
+ _ uint8 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = uint8(0 /* ERROR "cannot convert" */ - 1)
+ _ = uint8(0)
+ _ = uint8(maxUint8)
+ _ = uint8(maxUint8 /* ERROR "cannot convert" */ + 1)
+ _ = uint8(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ uint16 = 0 /* ERROR "overflows" */ - 1
+ _ uint16 = 0
+ _ uint16 = maxUint16
+ _ uint16 = maxUint16 /* ERROR "overflows" */ + 1
+ _ uint16 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = uint16(0 /* ERROR "cannot convert" */ - 1)
+ _ = uint16(0)
+ _ = uint16(maxUint16)
+ _ = uint16(maxUint16 /* ERROR "cannot convert" */ + 1)
+ _ = uint16(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ uint32 = 0 /* ERROR "overflows" */ - 1
+ _ uint32 = 0
+ _ uint32 = maxUint32
+ _ uint32 = maxUint32 /* ERROR "overflows" */ + 1
+ _ uint32 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = uint32(0 /* ERROR "cannot convert" */ - 1)
+ _ = uint32(0)
+ _ = uint32(maxUint32)
+ _ = uint32(maxUint32 /* ERROR "cannot convert" */ + 1)
+ _ = uint32(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ uint64 = 0 /* ERROR "overflows" */ - 1
+ _ uint64 = 0
+ _ uint64 = maxUint64
+ _ uint64 = maxUint64 /* ERROR "overflows" */ + 1
+ _ uint64 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = uint64(0 /* ERROR "cannot convert" */ - 1)
+ _ = uint64(0)
+ _ = uint64(maxUint64)
+ _ = uint64(maxUint64 /* ERROR "cannot convert" */ + 1)
+ _ = uint64(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ uint = 0 /* ERROR "overflows" */ - 1
+ _ uint = 0
+ _ uint = maxUint
+ _ uint = maxUint /* ERROR "overflows" */ + 1
+ _ uint = smallestFloat64 /* ERROR "truncated" */
+
+ _ = uint(0 /* ERROR "cannot convert" */ - 1)
+ _ = uint(0)
+ _ = uint(maxUint)
+ _ = uint(maxUint /* ERROR "cannot convert" */ + 1)
+ _ = uint(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ uintptr = 0 /* ERROR "overflows" */ - 1
+ _ uintptr = 0
+ _ uintptr = maxUintptr
+ _ uintptr = maxUintptr /* ERROR "overflows" */ + 1
+ _ uintptr = smallestFloat64 /* ERROR "truncated" */
+
+ _ = uintptr(0 /* ERROR "cannot convert" */ - 1)
+ _ = uintptr(0)
+ _ = uintptr(maxUintptr)
+ _ = uintptr(maxUintptr /* ERROR "cannot convert" */ + 1)
+ _ = uintptr(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ float32 = minInt64
+ _ float64 = minInt64
+ _ complex64 = minInt64
+ _ complex128 = minInt64
+
+ _ = float32(minInt64)
+ _ = float64(minInt64)
+ _ = complex64(minInt64)
+ _ = complex128(minInt64)
+)
+
+const (
+ _ float32 = maxUint64
+ _ float64 = maxUint64
+ _ complex64 = maxUint64
+ _ complex128 = maxUint64
+
+ _ = float32(maxUint64)
+ _ = float64(maxUint64)
+ _ = complex64(maxUint64)
+ _ = complex128(maxUint64)
+)
+
+// TODO(gri) find smaller deltas below
+
+const delta32 = maxFloat32/(1 << 23)
+
+const (
+ _ float32 = - /* ERROR "overflow" */ (maxFloat32 + delta32)
+ _ float32 = -maxFloat32
+ _ float32 = maxFloat32
+ _ float32 = maxFloat32 /* ERROR "overflow" */ + delta32
+
+ _ = float32(- /* ERROR "cannot convert" */ (maxFloat32 + delta32))
+ _ = float32(-maxFloat32)
+ _ = float32(maxFloat32)
+ _ = float32(maxFloat32 /* ERROR "cannot convert" */ + delta32)
+
+ _ = assert(float32(smallestFloat32) == smallestFloat32)
+ _ = assert(float32(smallestFloat32/2) == 0)
+ _ = assert(float32(smallestFloat64) == 0)
+ _ = assert(float32(smallestFloat64/2) == 0)
+)
+
+const delta64 = maxFloat64/(1 << 52)
+
+const (
+ _ float64 = - /* ERROR "overflow" */ (maxFloat64 + delta64)
+ _ float64 = -maxFloat64
+ _ float64 = maxFloat64
+ _ float64 = maxFloat64 /* ERROR "overflow" */ + delta64
+
+ _ = float64(- /* ERROR "cannot convert" */ (maxFloat64 + delta64))
+ _ = float64(-maxFloat64)
+ _ = float64(maxFloat64)
+ _ = float64(maxFloat64 /* ERROR "cannot convert" */ + delta64)
+
+ _ = assert(float64(smallestFloat32) == smallestFloat32)
+ _ = assert(float64(smallestFloat32/2) == smallestFloat32/2)
+ _ = assert(float64(smallestFloat64) == smallestFloat64)
+ _ = assert(float64(smallestFloat64/2) == 0)
+)
+
+const (
+ _ complex64 = - /* ERROR "overflow" */ (maxFloat32 + delta32)
+ _ complex64 = -maxFloat32
+ _ complex64 = maxFloat32
+ _ complex64 = maxFloat32 /* ERROR "overflow" */ + delta32
+
+ _ = complex64(- /* ERROR "cannot convert" */ (maxFloat32 + delta32))
+ _ = complex64(-maxFloat32)
+ _ = complex64(maxFloat32)
+ _ = complex64(maxFloat32 /* ERROR "cannot convert" */ + delta32)
+)
+
+const (
+ _ complex128 = - /* ERROR "overflow" */ (maxFloat64 + delta64)
+ _ complex128 = -maxFloat64
+ _ complex128 = maxFloat64
+ _ complex128 = maxFloat64 /* ERROR "overflow" */ + delta64
+
+ _ = complex128(- /* ERROR "cannot convert" */ (maxFloat64 + delta64))
+ _ = complex128(-maxFloat64)
+ _ = complex128(maxFloat64)
+ _ = complex128(maxFloat64 /* ERROR "cannot convert" */ + delta64)
+)
+
+// Initialization of typed constant and conversion are the same:
+const (
+ f32 = 1 + smallestFloat32
+ x32 float32 = f32
+ y32 = float32(f32)
+ _ = assert(x32 - y32 == 0)
+)
+
+const (
+ f64 = 1 + smallestFloat64
+ x64 float64 = f64
+ y64 = float64(f64)
+ _ = assert(x64 - y64 == 0)
+)
diff --git a/llgo/third_party/go.tools/go/types/testdata/constdecl.src b/llgo/third_party/go.tools/go/types/testdata/constdecl.src
new file mode 100644
index 0000000000000000000000000000000000000000..8577cb92c345490ed5f667b71c2a81c7358dcd10
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/constdecl.src
@@ -0,0 +1,94 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package constdecl
+
+import "math"
+
+var v int
+
+// Const decls must be initialized by constants.
+const _ = v /* ERROR "not constant" */
+const _ = math /* ERROR "not constant" */ .Sin(0)
+const _ = int /* ERROR "not an expression" */
+
+func _() {
+ const _ = v /* ERROR "not constant" */
+ const _ = math /* ERROR "not constant" */ .Sin(0)
+ const _ = int /* ERROR "not an expression" */
+}
+
+// Identifier and expression arity must match.
+const _ /* ERROR "missing init expr for _" */
+const _ = 1, 2 /* ERROR "extra init expr 2" */
+
+const _ /* ERROR "missing init expr for _" */ int
+const _ int = 1, 2 /* ERROR "extra init expr 2" */
+
+const (
+ _ /* ERROR "missing init expr for _" */
+ _ = 1, 2 /* ERROR "extra init expr 2" */
+
+ _ /* ERROR "missing init expr for _" */ int
+ _ int = 1, 2 /* ERROR "extra init expr 2" */
+)
+
+const (
+ _ = 1
+ _
+ _, _ /* ERROR "missing init expr for _" */
+ _
+)
+
+const (
+ _, _ = 1, 2
+ _, _
+ _ /* ERROR "extra init expr at" */
+ _, _
+ _, _, _ /* ERROR "missing init expr for _" */
+ _, _
+)
+
+func _() {
+ const _ /* ERROR "missing init expr for _" */
+ const _ = 1, 2 /* ERROR "extra init expr 2" */
+
+ const _ /* ERROR "missing init expr for _" */ int
+ const _ int = 1, 2 /* ERROR "extra init expr 2" */
+
+ const (
+ _ /* ERROR "missing init expr for _" */
+ _ = 1, 2 /* ERROR "extra init expr 2" */
+
+ _ /* ERROR "missing init expr for _" */ int
+ _ int = 1, 2 /* ERROR "extra init expr 2" */
+ )
+
+ const (
+ _ = 1
+ _
+ _, _ /* ERROR "missing init expr for _" */
+ _
+ )
+
+ const (
+ _, _ = 1, 2
+ _, _
+ _ /* ERROR "extra init expr at" */
+ _, _
+ _, _, _ /* ERROR "missing init expr for _" */
+ _, _
+ )
+}
+
+// Test case for constant with invalid initialization.
+// Caused panic because the constant value was not set up (gri - 7/8/2014).
+func _() {
+ const (
+ x string = missing /* ERROR "undeclared name" */
+ y = x + ""
+ )
+}
+
+// TODO(gri) move extra tests from testdata/const0.src into here
diff --git a/llgo/third_party/go.tools/go/types/testdata/conversions.src b/llgo/third_party/go.tools/go/types/testdata/conversions.src
new file mode 100644
index 0000000000000000000000000000000000000000..4251424646ee5c4a734cbb1bf322835e85973487
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/conversions.src
@@ -0,0 +1,88 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// conversions
+
+package conversions
+
+import "unsafe"
+
+// argument count
+var (
+ _ = int() /* ERROR "missing argument" */
+ _ = int(1, 2 /* ERROR "too many arguments" */ )
+)
+
+// numeric constant conversions are in const1.src.
+
+func string_conversions() {
+ const A = string(65)
+ assert(A == "A")
+ const E = string(-1)
+ assert(E == "\uFFFD")
+ assert(E == string(1234567890))
+
+ type myint int
+ assert(A == string(myint(65)))
+
+ type mystring string
+ const _ mystring = mystring("foo")
+
+ const _ = string(true /* ERROR "cannot convert" */ )
+ const _ = string(1.2 /* ERROR "cannot convert" */ )
+ const _ = string(nil /* ERROR "cannot convert" */ )
+}
+
+func interface_conversions() {
+ type E interface{}
+
+ type I1 interface{
+ m1()
+ }
+
+ type I2 interface{
+ m1()
+ m2(x int)
+ }
+
+ type I3 interface{
+ m1()
+ m2() int
+ }
+
+ var e E
+ var i1 I1
+ var i2 I2
+ var i3 I3
+
+ _ = E(0)
+ _ = E(nil)
+ _ = E(e)
+ _ = E(i1)
+ _ = E(i2)
+
+ _ = I1(0 /* ERROR "cannot convert" */ )
+ _ = I1(nil)
+ _ = I1(i1)
+ _ = I1(e /* ERROR "cannot convert" */ )
+ _ = I1(i2)
+
+ _ = I2(nil)
+ _ = I2(i1 /* ERROR "cannot convert" */ )
+ _ = I2(i2)
+ _ = I2(i3 /* ERROR "cannot convert" */ )
+
+ _ = I3(nil)
+ _ = I3(i1 /* ERROR "cannot convert" */ )
+ _ = I3(i2 /* ERROR "cannot convert" */ )
+ _ = I3(i3)
+
+ // TODO(gri) add more tests, improve error message
+}
+
+func issue6326() {
+ type T unsafe.Pointer
+ var x T
+ _ = uintptr(x) // see issue 6326
+}
diff --git a/llgo/third_party/go.tools/go/types/testdata/cycles.src b/llgo/third_party/go.tools/go/types/testdata/cycles.src
new file mode 100644
index 0000000000000000000000000000000000000000..621d83c9450a03f7823a657e0b01a96a0dc098b3
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/cycles.src
@@ -0,0 +1,143 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cycles
+
+type (
+ T0 int
+ T1 /* ERROR cycle */ T1
+ T2 *T2
+
+ T3 /* ERROR cycle */ T4
+ T4 T5
+ T5 T3
+
+ T6 T7
+ T7 *T8
+ T8 T6
+
+ // arrays
+ A0 /* ERROR cycle */ [10]A0
+ A1 [10]*A1
+
+ A2 /* ERROR cycle */ [10]A3
+ A3 [10]A4
+ A4 A2
+
+ A5 [10]A6
+ A6 *A5
+
+ // slices
+ L0 []L0
+
+ // structs
+ S0 /* ERROR cycle */ struct{ _ S0 }
+ S1 /* ERROR cycle */ struct{ S1 }
+ S2 struct{ _ *S2 }
+ S3 struct{ *S3 }
+
+ S4 /* ERROR cycle */ struct{ S5 }
+ S5 struct{ S6 }
+ S6 S4
+
+ // pointers
+ P0 *P0
+
+ // functions
+ F0 func(F0)
+ F1 func() F1
+ F2 func(F2) F2
+
+ // interfaces
+ I0 /* ERROR cycle */ interface{ I0 }
+
+ I1 interface{ I2 }
+ I2 interface{ I3 }
+ I3 /* ERROR cycle */ interface{ I1 }
+
+ I4 interface{ f(I4) }
+
+ // testcase for issue 5090
+ I5 interface{ f(I6) }
+ I6 interface{ I5 }
+
+ // maps
+ M0 map[M0 /* ERROR invalid map key */ ]M0
+
+ // channels
+ C0 chan C0
+)
+
+func _() {
+ type (
+ t1 /* ERROR cycle */ t1
+ t2 *t2
+
+ t3 t4 /* ERROR undeclared */
+ t4 t5 /* ERROR undeclared */
+ t5 t3
+
+ // arrays
+ a0 /* ERROR cycle */ [10]a0
+ a1 [10]*a1
+
+ // slices
+ l0 []l0
+
+ // structs
+ s0 /* ERROR cycle */ struct{ _ s0 }
+ s1 /* ERROR cycle */ struct{ s1 }
+ s2 struct{ _ *s2 }
+ s3 struct{ *s3 }
+
+ // pointers
+ p0 *p0
+
+ // functions
+ f0 func(f0)
+ f1 func() f1
+ f2 func(f2) f2
+
+ // interfaces
+ i0 /* ERROR cycle */ interface{ i0 }
+
+ // maps
+ m0 map[m0 /* ERROR invalid map key */ ]m0
+
+ // channels
+ c0 chan c0
+ )
+}
+
+// test cases for issue 6667
+
+type A [10]map[A /* ERROR invalid map key */ ]bool
+
+type S struct {
+ m map[S /* ERROR invalid map key */ ]bool
+}
+
+// test cases for issue 7236
+// (cycle detection must not be dependent on starting point of resolution)
+
+type (
+ P1 *T9
+ T9 /* ERROR cycle */ T9
+
+ T10 /* ERROR cycle */ T10
+ P2 *T10
+)
+
+func (T11) m() {}
+
+type T11 /* ERROR cycle */ struct{ T11 }
+
+type T12 /* ERROR cycle */ struct{ T12 }
+
+func (*T12) m() {}
+
+type (
+ P3 *T13
+ T13 /* ERROR cycle */ T13
+)
\ No newline at end of file
diff --git a/llgo/third_party/go.tools/go/types/testdata/cycles1.src b/llgo/third_party/go.tools/go/types/testdata/cycles1.src
new file mode 100644
index 0000000000000000000000000000000000000000..ae2b38ebec21e36d5f2e5bdd360948e8cbba97ce
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/cycles1.src
@@ -0,0 +1,77 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ A interface {
+ a() interface {
+ ABC1
+ }
+ }
+ B interface {
+ b() interface {
+ ABC2
+ }
+ }
+ C interface {
+ c() interface {
+ ABC3
+ }
+ }
+
+ AB interface {
+ A
+ B
+ }
+ BC interface {
+ B
+ C
+ }
+
+ ABC1 interface {
+ A
+ B
+ C
+ }
+ ABC2 interface {
+ AB
+ C
+ }
+ ABC3 interface {
+ A
+ BC
+ }
+)
+
+var (
+ x1 ABC1
+ x2 ABC2
+ x3 ABC3
+)
+
+func _() {
+ // all types have the same method set
+ x1 = x2
+ x2 = x1
+
+ x1 = x3
+ x3 = x1
+
+ x2 = x3
+ x3 = x2
+
+ // all methods return the same type again
+ x1 = x1.a()
+ x1 = x1.b()
+ x1 = x1.c()
+
+ x2 = x2.a()
+ x2 = x2.b()
+ x2 = x2.c()
+
+ x3 = x3.a()
+ x3 = x3.b()
+ x3 = x3.c()
+}
diff --git a/llgo/third_party/go.tools/go/types/testdata/cycles2.src b/llgo/third_party/go.tools/go/types/testdata/cycles2.src
new file mode 100644
index 0000000000000000000000000000000000000000..345ab56ea690c65433965ee968488efe026ef72c
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/cycles2.src
@@ -0,0 +1,118 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+// Test case for issue 5090
+
+type t interface {
+ f(u)
+}
+
+type u interface {
+ t
+}
+
+func _() {
+ var t t
+ var u u
+
+ t.f(t)
+ t.f(u)
+
+ u.f(t)
+ u.f(u)
+}
+
+
+// Test case for issue 6589.
+
+type A interface {
+ a() interface {
+ AB
+ }
+}
+
+type B interface {
+ a() interface {
+ AB
+ }
+}
+
+type AB interface {
+ a() interface {
+ A
+ B /* ERROR a redeclared */
+ }
+ b() interface {
+ A
+ B /* ERROR a redeclared */
+ }
+}
+
+var x AB
+var y interface {
+ A
+ B /* ERROR a redeclared */
+}
+var _ = x /* ERROR cannot compare */ == y
+
+
+// Test case for issue 6638.
+
+type T interface {
+ m() [T /* ERROR no value */ (nil).m()[0]]int
+}
+
+// Variations of this test case.
+
+type T1 interface {
+ m() [x1 /* ERROR no value */ .m()[0]]int
+}
+
+var x1 T1
+
+type T2 interface {
+ m() [len(x2 /* ERROR no value */ .m())]int
+}
+
+var x2 T2
+
+type T3 interface {
+ m() [unsafe.Sizeof(x3.m)]int
+}
+
+var x3 T3
+
+// The test case below should also report an error for
+// the cast inside the T4 interface (like it does for the
+// variable initialization). The reason why it does not is
+// that inside T4, the method x4.m depends on T4 which is not
+// fully set up yet. The x4.m method happens to have an empty
+// signature which is why the cast is permitted.
+// TODO(gri) Consider marking methods as incomplete and provide
+// a better error message in that case.
+
+type T4 interface {
+ m() [unsafe.Sizeof(cast4(x4.m))]int
+}
+
+var x4 T4
+var _ = cast4(x4 /* ERROR cannot convert */.m)
+
+type cast4 func()
+
+// This test is symmetric to the T4 case: Here the cast is
+// "correct", but it doesn't work inside the T5 interface.
+
+type T5 interface {
+ m() [unsafe.Sizeof(cast5(x5 /* ERROR cannot convert */ .m))]int
+}
+
+var x5 T5
+var _ = cast5(x5.m)
+
+type cast5 func() [0]int
diff --git a/llgo/third_party/go.tools/go/types/testdata/cycles3.src b/llgo/third_party/go.tools/go/types/testdata/cycles3.src
new file mode 100644
index 0000000000000000000000000000000000000000..3da4fb5761a662ae0aa5108586ccb5a71e897698
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/cycles3.src
@@ -0,0 +1,60 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+var (
+ _ A = A(nil).a().b().c().d().e().f()
+ _ A = A(nil).b().c().d().e().f()
+ _ A = A(nil).c().d().e().f()
+ _ A = A(nil).d().e().f()
+ _ A = A(nil).e().f()
+ _ A = A(nil).f()
+ _ A = A(nil)
+)
+
+type (
+ A interface {
+ a() B
+ B
+ }
+
+ B interface {
+ b() C
+ C
+ }
+
+ C interface {
+ c() D
+ D
+ }
+
+ D interface {
+ d() E
+ E
+ }
+
+ E interface {
+ e() F
+ F
+ }
+
+ F interface {
+ f() A
+ }
+)
+
+type (
+ U interface {
+ V
+ }
+
+ V interface {
+ v() [unsafe.Sizeof(u)]int
+ }
+)
+
+var u U
diff --git a/llgo/third_party/go.tools/go/types/testdata/cycles4.src b/llgo/third_party/go.tools/go/types/testdata/cycles4.src
new file mode 100644
index 0000000000000000000000000000000000000000..445babca68bc3db73a1cc192f366480171fbc659
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/cycles4.src
@@ -0,0 +1,110 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Check that all methods of T are collected before
+// determining the result type of m (which embeds
+// all methods of T).
+
+type T interface {
+ m() interface {T}
+ E
+}
+
+var _ = T.m(nil).m().e()
+
+type E interface {
+ e() int
+}
+
+// Check that unresolved forward chains are followed
+// (see also comment in resolver.go, checker.typeDecl).
+
+var _ = C.m(nil).m().e()
+
+type A B
+
+type B interface {
+ m() interface{C}
+ E
+}
+
+type C A
+
+// Check that interface type comparison for identity
+// does not recur endlessly.
+
+type T1 interface {
+ m() interface{T1}
+}
+
+type T2 interface {
+ m() interface{T2}
+}
+
+func _(x T1, y T2) {
+ // Checking for assignability of interfaces must check
+ // if all methods of x are present in y, and that they
+ // have identical signatures. The signatures recur via
+ // the result type, which is an interface that embeds
+ // a single method m that refers to the very interface
+ // that contains it. This requires cycle detection in
+ // identity checks for interface types.
+ x = y
+}
+
+type T3 interface {
+ m() interface{T4}
+}
+
+type T4 interface {
+ m() interface{T3}
+}
+
+func _(x T1, y T3) {
+ x = y
+}
+
+// Check that interfaces are type-checked in order of
+// (embedded interface) dependencies (was issue 7158).
+
+var x1 T5 = T7(nil)
+
+type T5 interface {
+ T6
+}
+
+type T6 interface {
+ m() T7
+}
+type T7 interface {
+ T5
+}
+
+// Actual test case from issue 7158.
+
+func wrapNode() Node {
+ return wrapElement()
+}
+
+func wrapElement() Element {
+ return nil
+}
+
+type EventTarget interface {
+ AddEventListener(Event)
+}
+
+type Node interface {
+ EventTarget
+}
+
+type Element interface {
+ Node
+}
+
+type Event interface {
+ Target() Element
+}
diff --git a/llgo/third_party/go.tools/go/types/testdata/decls0.src b/llgo/third_party/go.tools/go/types/testdata/decls0.src
new file mode 100644
index 0000000000000000000000000000000000000000..f1df3ea7033db8fdcade60f2cc8f3b0d43bdbaea
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/decls0.src
@@ -0,0 +1,206 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// type declarations
+
+package decls0
+
+import "unsafe"
+
+const pi = 3.1415
+
+type (
+ N undeclared /* ERROR "undeclared" */
+ B bool
+ I int32
+ A [10]P
+ T struct {
+ x, y P
+ }
+ P *T
+ R (*R)
+ F func(A) I
+ Y interface {
+ f(A) I
+ }
+ S [](((P)))
+ M map[I]F
+ C chan<- I
+
+ // blank types must be typechecked
+ _ pi /* ERROR "not a type" */
+ _ struct{}
+ _ struct{ pi /* ERROR "not a type" */ }
+)
+
+
+// declarations of init
+const _, init /* ERROR "cannot declare init" */ , _ = 0, 1, 2
+type init /* ERROR "cannot declare init" */ struct{}
+var _, init /* ERROR "cannot declare init" */ int
+
+func init() {}
+func init /* ERROR "missing function body" */ ()
+
+func _() { const init = 0 }
+func _() { type init int }
+func _() { var init int; _ = init }
+
+// invalid array types
+type (
+ iA0 [... /* ERROR "invalid use of '...'" */ ]byte
+ iA1 [1 /* ERROR "invalid array length" */ <<100]int
+ iA2 [- /* ERROR "invalid array length" */ 1]complex128
+ iA3 ["foo" /* ERROR "must be integer" */ ]string
+)
+
+
+type (
+ p1 pi /* ERROR "no field or method foo" */ .foo
+ p2 unsafe.Pointer
+)
+
+
+type (
+ Pi pi /* ERROR "not a type" */
+
+ a /* ERROR "illegal cycle" */ a
+ a /* ERROR "redeclared" */ int
+
+ // where the cycle error appears depends on the
+ // order in which declarations are processed
+ // (which depends on the order in which a map
+ // is iterated through)
+ b /* ERROR "illegal cycle" */ c
+ c d
+ d e
+ e b
+
+ t *t
+
+ U V
+ V *W
+ W U
+
+ P1 *S2
+ P2 P1
+
+ S0 struct {
+ }
+ S1 struct {
+ a, b, c int
+ u, v, a /* ERROR "redeclared" */ float32
+ }
+ S2 struct {
+ S0 // anonymous field
+ S0 /* ERROR "redeclared" */ int
+ }
+ S3 struct {
+ x S2
+ }
+ S4/* ERROR "illegal cycle" */ struct {
+ S4
+ }
+ S5 /* ERROR "illegal cycle" */ struct {
+ S6
+ }
+ S6 struct {
+ field S7
+ }
+ S7 struct {
+ S5
+ }
+
+ L1 []L1
+ L2 []int
+
+ A1 [10.0]int
+ A2 /* ERROR "illegal cycle" */ [10]A2
+ A3 /* ERROR "illegal cycle" */ [10]struct {
+ x A4
+ }
+ A4 [10]A3
+
+ F1 func()
+ F2 func(x, y, z float32)
+ F3 func(x, y, x /* ERROR "redeclared" */ float32)
+ F4 func() (x, y, x /* ERROR "redeclared" */ float32)
+ F5 func(x int) (x /* ERROR "redeclared" */ float32)
+ F6 func(x ...int)
+
+ I1 interface{}
+ I2 interface {
+ m1()
+ }
+ I3 interface {
+ m1()
+ m1 /* ERROR "redeclared" */ ()
+ }
+ I4 interface {
+ m1(x, y, x /* ERROR "redeclared" */ float32)
+ m2() (x, y, x /* ERROR "redeclared" */ float32)
+ m3(x int) (x /* ERROR "redeclared" */ float32)
+ }
+ I5 interface {
+ m1(I5)
+ }
+ I6 interface {
+ S0 /* ERROR "not an interface" */
+ }
+ I7 interface {
+ I1
+ I1
+ }
+ I8 /* ERROR "illegal cycle" */ interface {
+ I8
+ }
+ I9 interface {
+ I10
+ }
+ I10 interface {
+ I11
+ }
+ I11 /* ERROR "illegal cycle" */ interface {
+ I9
+ }
+
+ C1 chan int
+ C2 <-chan int
+ C3 chan<- C3
+ C4 chan C5
+ C5 chan C6
+ C6 chan C4
+
+ M1 map[Last]string
+ M2 map[string]M2
+
+ Last int
+)
+
+// cycles in function/method declarations
+// (test cases for issue 5217 and variants)
+func f1(x f1 /* ERROR "not a type" */ ) {}
+func f2(x *f2 /* ERROR "not a type" */ ) {}
+func f3() (x f3 /* ERROR "not a type" */ ) { return }
+func f4() (x *f4 /* ERROR "not a type" */ ) { return }
+
+func (S0) m1(x S0 /* ERROR "field or method" */ .m1) {}
+func (S0) m2(x *S0 /* ERROR "field or method" */ .m2) {}
+func (S0) m3() (x S0 /* ERROR "field or method" */ .m3) { return }
+func (S0) m4() (x *S0 /* ERROR "field or method" */ .m4) { return }
+
+// interfaces may not have any blank methods
+type BlankI interface {
+ _ /* ERROR "invalid method name" */ ()
+ _ /* ERROR "invalid method name" */ (float32) int
+ m()
+}
+
+// non-interface types may have multiple blank methods
+type BlankT struct{}
+
+func (BlankT) _() {}
+func (BlankT) _(int) {}
+func (BlankT) _() int { return 0 }
+func (BlankT) _(int) int { return 0}
diff --git a/llgo/third_party/go.tools/go/types/testdata/decls1.src b/llgo/third_party/go.tools/go/types/testdata/decls1.src
new file mode 100644
index 0000000000000000000000000000000000000000..7855e461e21da5154cdba4bb81ad6473f59b26a5
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/decls1.src
@@ -0,0 +1,144 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// variable declarations
+
+package decls1
+
+import (
+ "math"
+)
+
+// Global variables without initialization
+var (
+ a, b bool
+ c byte
+ d uint8
+ r rune
+ i int
+ j, k, l int
+ x, y float32
+ xx, yy float64
+ u, v complex64
+ uu, vv complex128
+ s, t string
+ array []byte
+ iface interface{}
+
+ blank _ /* ERROR "cannot use _" */
+)
+
+// Global variables with initialization
+var (
+ s1 = i + j
+ s2 = i /* ERROR "mismatched types" */ + x
+ s3 = c + d
+ s4 = s + t
+ s5 = s /* ERROR "invalid operation" */ / t
+ s6 = array[t1]
+ s7 = array[x /* ERROR "integer" */]
+ s8 = &a
+ s10 = &42 /* ERROR "cannot take address" */
+ s11 = &v
+ s12 = -(u + *t11) / *&v
+ s13 = a /* ERROR "shifted operand" */ << d
+ s14 = i << j /* ERROR "must be unsigned" */
+ s18 = math.Pi * 10.0
+ s19 = s1 /* ERROR "cannot call" */ ()
+ s20 = f0 /* ERROR "no value" */ ()
+ s21 = f6(1, s1, i)
+ s22 = f6(1, s1, uu /* ERROR "cannot pass argument" */ )
+
+ t1 int = i + j
+ t2 int = i /* ERROR "mismatched types" */ + x
+ t3 int = c /* ERROR "cannot initialize" */ + d
+ t4 string = s + t
+ t5 string = s /* ERROR "invalid operation" */ / t
+ t6 byte = array[t1]
+ t7 byte = array[x /* ERROR "must be integer" */]
+ t8 *int = & /* ERROR "cannot initialize" */ a
+ t10 *int = &42 /* ERROR "cannot take address" */
+ t11 *complex64 = &v
+ t12 complex64 = -(u + *t11) / *&v
+ t13 int = a /* ERROR "shifted operand" */ << d
+ t14 int = i << j /* ERROR "must be unsigned" */
+ t15 math /* ERROR "not in selector" */
+ t16 math /* ERROR "not declared" */ .xxx
+ t17 math /* ERROR "not a type" */ .Pi
+ t18 float64 = math.Pi * 10.0
+ t19 int = t1 /* ERROR "cannot call" */ ()
+ t20 int = f0 /* ERROR "no value" */ ()
+ t21 int = a /* ERROR "cannot initialize" */
+)
+
+// Various more complex expressions
+var (
+ u1 = x /* ERROR "not an interface" */ .(int)
+ u2 = iface.([]int)
+ u3 = iface.(a /* ERROR "not a type" */ )
+ u4, ok = iface.(int)
+ u5, ok2, ok3 = iface /* ERROR "assignment count mismatch" */ .(int)
+)
+
+// Constant expression initializations
+var (
+ v1 = 1 /* ERROR "cannot convert" */ + "foo"
+ v2 = c + 255
+ v3 = c + 256 /* ERROR "overflows" */
+ v4 = r + 2147483647
+ v5 = r + 2147483648 /* ERROR "overflows" */
+ v6 = 42
+ v7 = v6 + 9223372036854775807
+ v8 = v6 + 9223372036854775808 /* ERROR "overflows" */
+ v9 = i + 1 << 10
+ v10 byte = 1024 /* ERROR "overflows" */
+ v11 = xx/yy*yy - xx
+ v12 = true && false
+ v13 = nil /* ERROR "use of untyped nil" */
+)
+
+// Multiple assignment expressions
+var (
+ m1a, m1b = 1, 2
+ m2a, m2b, m2c /* ERROR "missing init expr for m2c" */ = 1, 2
+ m3a, m3b = 1, 2, 3 /* ERROR "extra init expr 3" */
+)
+
+func _() {
+ var (
+ m1a, m1b = 1, 2
+ m2a, m2b, m2c /* ERROR "missing init expr for m2c" */ = 1, 2
+ m3a, m3b = 1, 2, 3 /* ERROR "extra init expr 3" */
+ )
+
+ _, _ = m1a, m1b
+ _, _, _ = m2a, m2b, m2c
+ _, _ = m3a, m3b
+}
+
+// Declaration of parameters and results
+func f0() {}
+func f1(a /* ERROR "not a type" */) {}
+func f2(a, b, c d /* ERROR "not a type" */) {}
+
+func f3() int { return 0 }
+func f4() a /* ERROR "not a type" */ { return 0 }
+func f5() (a, b, c d /* ERROR "not a type" */) { return }
+
+func f6(a, b, c int) complex128 { return 0 }
+
+// Declaration of receivers
+type T struct{}
+
+func (T) m0() {}
+func (*T) m1() {}
+func (x T) m2() {}
+func (x *T) m3() {}
+
+// Initialization functions
+func init() {}
+func /* ERROR "no arguments and no return values" */ init(int) {}
+func /* ERROR "no arguments and no return values" */ init() int { return 0 }
+func /* ERROR "no arguments and no return values" */ init(int) int { return 0 }
+func (T) init(int) int { return 0 }
diff --git a/llgo/third_party/go.tools/go/types/testdata/decls2a.src b/llgo/third_party/go.tools/go/types/testdata/decls2a.src
new file mode 100644
index 0000000000000000000000000000000000000000..bdbecd9dbb6358a38b9d535c461624562d66b51d
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/decls2a.src
@@ -0,0 +1,111 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// method declarations
+
+package decls2
+
+import "time"
+import "unsafe"
+
+// T1 declared before its methods.
+type T1 struct{
+ f int
+}
+
+func (T1) m() {}
+func (T1) m /* ERROR "already declared" */ () {}
+func (x *T1) f /* ERROR "field and method" */ () {}
+
+// Conflict between embedded field and method name,
+// with the embedded field being a basic type.
+type T1b struct {
+ int
+}
+
+func (T1b) int /* ERROR "field and method" */ () {}
+
+type T1c struct {
+ time.Time
+}
+
+func (T1c) Time /* ERROR "field and method" */ () int { return 0 }
+
+// Disabled for now: LookupFieldOrMethod will find Pointer even though
+// it's double-declared (it would cost extra in the common case to verify
+// this). But the MethodSet computation will not find it due to the name
+// collision caused by the double-declaration, leading to an internal
+// inconsistency while we are verifying one computation against the other.
+// var _ = T1c{}.Pointer
+
+// T2's method declared before the type.
+func (*T2) f /* ERROR "field and method" */ () {}
+
+type T2 struct {
+ f int
+}
+
+// Methods declared without a declared type.
+func (undeclared /* ERROR "undeclared" */) m() {}
+func (x *undeclared /* ERROR "undeclared" */) m() {}
+
+func (pi /* ERROR "not a type" */) m1() {}
+func (x pi /* ERROR "not a type" */) m2() {}
+func (x *pi /* ERROR "not a type" */ ) m3() {}
+
+// Blank types.
+type _ struct { m int }
+type _ struct { m int }
+
+func (_ /* ERROR "cannot use _" */) m() {}
+func m(_ /* ERROR "cannot use _" */) {}
+
+// Methods with receiver base type declared in another file.
+func (T3) m1() {}
+func (*T3) m2() {}
+func (x T3) m3() {}
+func (x *T3) f /* ERROR "field and method" */ () {}
+
+// Methods of non-struct type.
+type T4 func()
+
+func (self T4) m() func() { return self }
+
+// Methods associated with an interface.
+type T5 interface {
+ m() int
+}
+
+func (T5 /* ERROR "invalid receiver" */ ) m1() {}
+func (T5 /* ERROR "invalid receiver" */ ) m2() {}
+
+// Methods associated with a named pointer type.
+type ptr *int
+func (ptr /* ERROR "invalid receiver" */ ) _() {}
+func (* /* ERROR "invalid receiver" */ ptr) _() {}
+
+// Methods with zero or multiple receivers.
+func ( /* ERROR "missing receiver" */ ) _() {}
+func (T3, * /* ERROR "exactly one receiver" */ T3) _() {}
+func (T3, T3, T3 /* ERROR "exactly one receiver" */ ) _() {}
+func (a, b /* ERROR "exactly one receiver" */ T3) _() {}
+func (a, b, c /* ERROR "exactly one receiver" */ T3) _() {}
+
+// Methods associated with non-local or unnamed types.
+func (int /* ERROR "invalid receiver" */ ) m() {}
+func ([ /* ERROR "invalid receiver" */ ]int) m() {}
+func (time /* ERROR "invalid receiver" */ .Time) m() {}
+func (* /* ERROR "invalid receiver" */ time.Time) m() {}
+func (x /* ERROR "invalid receiver" */ interface{}) m() {}
+
+// Unsafe.Pointer is treated like a pointer when used as receiver type.
+type UP unsafe.Pointer
+func (UP /* ERROR "invalid" */ ) m1() {}
+func (* /* ERROR "invalid" */ UP) m2() {}
+
+// Double declarations across package files
+const c_double = 0
+type t_double int
+var v_double int
+func f_double() {}
diff --git a/llgo/third_party/go.tools/go/types/testdata/decls2b.src b/llgo/third_party/go.tools/go/types/testdata/decls2b.src
new file mode 100644
index 0000000000000000000000000000000000000000..e7bc394762f8c5532490b09ad8924197f3afd734
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/decls2b.src
@@ -0,0 +1,65 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// method declarations
+
+package decls2
+
+import "io"
+
+const pi = 3.1415
+
+func (T1) m /* ERROR "already declared" */ () {}
+func (T2) m(io.Writer) {}
+
+type T3 struct {
+ f *T3
+}
+
+type T6 struct {
+ x int
+}
+
+func (t *T6) m1() int {
+ return t.x
+}
+
+func f() {
+ var t *T6
+ t.m1()
+}
+
+// Double declarations across package files
+const c_double /* ERROR "redeclared" */ = 0
+type t_double /* ERROR "redeclared" */ int
+var v_double /* ERROR "redeclared" */ int
+func f_double /* ERROR "redeclared" */ () {}
+
+// Blank methods need to be type-checked.
+// Verify by checking that errors are reported.
+func (T /* ERROR "undeclared" */ ) _() {}
+func (T1) _(undeclared /* ERROR "undeclared" */ ) {}
+func (T1) _() int { return "foo" /* ERROR "cannot convert" */ }
+
+// Methods with undeclared receiver type can still be checked.
+// Verify by checking that errors are reported.
+func (Foo /* ERROR "undeclared" */ ) m() {}
+func (Foo /* ERROR "undeclared" */ ) m(undeclared /* ERROR "undeclared" */ ) {}
+func (Foo /* ERROR "undeclared" */ ) m() int { return "foo" /* ERROR "cannot convert" */ }
+
+func (Foo /* ERROR "undeclared" */ ) _() {}
+func (Foo /* ERROR "undeclared" */ ) _(undeclared /* ERROR "undeclared" */ ) {}
+func (Foo /* ERROR "undeclared" */ ) _() int { return "foo" /* ERROR "cannot convert" */ }
+
+// Receiver declarations are regular parameter lists;
+// receiver types may use parentheses, and the list
+// may have a trailing comma.
+type T7 struct {}
+
+func (T7) m1() {}
+func ((T7)) m2() {}
+func ((*T7)) m3() {}
+func (x *(T7),) m4() {}
+func (x (*(T7)),) m5() {}
+func (x ((*((T7)))),) m6() {}
diff --git a/llgo/third_party/go.tools/go/types/testdata/decls3.src b/llgo/third_party/go.tools/go/types/testdata/decls3.src
new file mode 100644
index 0000000000000000000000000000000000000000..80d2bc8ff8e5a8afc8c3235a7f1c43f4207678bf
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/decls3.src
@@ -0,0 +1,309 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// embedded types
+
+package decls3
+
+import "unsafe"
+import "fmt"
+
+// fields with the same name at the same level cancel each other out
+
+func _() {
+ type (
+ T1 struct { X int }
+ T2 struct { X int }
+ T3 struct { T1; T2 } // X is embedded twice at the same level via T1->X, T2->X
+ )
+
+ var t T3
+ _ = t /* ERROR "ambiguous selector" */ .X
+}
+
+func _() {
+ type (
+ T1 struct { X int }
+ T2 struct { T1 }
+ T3 struct { T1 }
+ T4 struct { T2; T3 } // X is embedded twice at the same level via T2->T1->X, T3->T1->X
+ )
+
+ var t T4
+ _ = t /* ERROR "ambiguous selector" */ .X
+}
+
+func issue4355() {
+ type (
+ T1 struct {X int}
+ T2 struct {T1}
+ T3 struct {T2}
+ T4 struct {T2}
+ T5 struct {T3; T4} // X is embedded twice at the same level via T3->T2->T1->X, T4->T2->T1->X
+ )
+
+ var t T5
+ _ = t /* ERROR "ambiguous selector" */ .X
+}
+
+func _() {
+ type State int
+ type A struct{ State }
+ type B struct{ fmt.State }
+ type T struct{ A; B }
+
+ var t T
+ _ = t /* ERROR "ambiguous selector" */ .State
+}
+
+// Embedded fields can be predeclared types.
+
+func _() {
+ type T0 struct{
+ int
+ float32
+ f int
+ }
+ var x T0
+ _ = x.int
+ _ = x.float32
+ _ = x.f
+
+ type T1 struct{
+ T0
+ }
+ var y T1
+ _ = y.int
+ _ = y.float32
+ _ = y.f
+}
+
+// Restrictions on embedded field types.
+
+func _() {
+ type I1 interface{}
+ type I2 interface{}
+ type P1 *int
+ type P2 *int
+ type UP unsafe.Pointer
+
+ type T1 struct {
+ I1
+ * /* ERROR "cannot be a pointer to an interface" */ I2
+ * /* ERROR "cannot be a pointer to an interface" */ error
+ P1 /* ERROR "cannot be a pointer" */
+ * /* ERROR "cannot be a pointer" */ P2
+ }
+
+ // unsafe.Pointers are treated like regular pointers when embedded
+ type T2 struct {
+ unsafe /* ERROR "cannot be unsafe.Pointer" */ .Pointer
+ */* ERROR "cannot be unsafe.Pointer" */ unsafe.Pointer
+ UP /* ERROR "cannot be unsafe.Pointer" */
+ * /* ERROR "cannot be unsafe.Pointer" */ UP
+ }
+}
+
+// Named types that are pointers.
+
+type S struct{ x int }
+func (*S) m() {}
+type P *S
+
+func _() {
+ var s *S
+ _ = s.x
+ _ = s.m
+
+ var p P
+ _ = p.x
+ _ = p /* ERROR "no field or method" */ .m
+ _ = P /* ERROR "no field or method" */ .m
+}
+
+// Borrowed from the FieldByName test cases in reflect/all_test.go.
+
+type D1 struct {
+ d int
+}
+type D2 struct {
+ d int
+}
+
+type S0 struct {
+ A, B, C int
+ D1
+ D2
+}
+
+type S1 struct {
+ B int
+ S0
+}
+
+type S2 struct {
+ A int
+ *S1
+}
+
+type S1x struct {
+ S1
+}
+
+type S1y struct {
+ S1
+}
+
+type S3 struct {
+ S1x
+ S2
+ D, E int
+ *S1y
+}
+
+type S4 struct {
+ *S4
+ A int
+}
+
+// The X in S6 and S7 annihilate, but they also block the X in S8.S9.
+type S5 struct {
+ S6
+ S7
+ S8
+}
+
+type S6 struct {
+ X int
+}
+
+type S7 S6
+
+type S8 struct {
+ S9
+}
+
+type S9 struct {
+ X int
+ Y int
+}
+
+// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9.
+type S10 struct {
+ S11
+ S12
+ S13
+}
+
+type S11 struct {
+ S6
+}
+
+type S12 struct {
+ S6
+}
+
+type S13 struct {
+ S8
+}
+
+func _() {
+ _ = struct /* ERROR "no field or method" */ {}{}.Foo
+ _ = S0{}.A
+ _ = S0 /* ERROR "no field or method" */ {}.D
+ _ = S1{}.A
+ _ = S1{}.B
+ _ = S1{}.S0
+ _ = S1{}.C
+ _ = S2{}.A
+ _ = S2{}.S1
+ _ = S2{}.B
+ _ = S2{}.C
+ _ = S2 /* ERROR "no field or method" */ {}.D
+ _ = S3 /* ERROR "ambiguous selector" */ {}.S1
+ _ = S3{}.A
+ _ = S3 /* ERROR "ambiguous selector" */ {}.B
+ _ = S3{}.D
+ _ = S3{}.E
+ _ = S4{}.A
+ _ = S4 /* ERROR "no field or method" */ {}.B
+ _ = S5 /* ERROR "ambiguous selector" */ {}.X
+ _ = S5{}.Y
+ _ = S10 /* ERROR "ambiguous selector" */ {}.X
+ _ = S10{}.Y
+}
+
+// Borrowed from the FieldByName benchmark in reflect/all_test.go.
+
+type R0 struct {
+ *R1
+ *R2
+ *R3
+ *R4
+}
+
+type R1 struct {
+ *R5
+ *R6
+ *R7
+ *R8
+}
+
+type R2 R1
+type R3 R1
+type R4 R1
+
+type R5 struct {
+ *R9
+ *R10
+ *R11
+ *R12
+}
+
+type R6 R5
+type R7 R5
+type R8 R5
+
+type R9 struct {
+ *R13
+ *R14
+ *R15
+ *R16
+}
+
+type R10 R9
+type R11 R9
+type R12 R9
+
+type R13 struct {
+ *R17
+ *R18
+ *R19
+ *R20
+}
+
+type R14 R13
+type R15 R13
+type R16 R13
+
+type R17 struct {
+ *R21
+ *R22
+ *R23
+ *R24
+}
+
+type R18 R17
+type R19 R17
+type R20 R17
+
+type R21 struct {
+ X int
+}
+
+type R22 R21
+type R23 R21
+type R24 R21
+
+var _ = R0 /* ERROR "ambiguous selector" */ {}.X
\ No newline at end of file
diff --git a/llgo/third_party/go.tools/go/types/testdata/errors.src b/llgo/third_party/go.tools/go/types/testdata/errors.src
new file mode 100644
index 0000000000000000000000000000000000000000..45bd45a13a4d500dda55afb2896128423d7b8899
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/errors.src
@@ -0,0 +1,55 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package errors
+
+// Testing precise operand formatting in error messages
+// (matching messages are regular expressions, hence the \'s).
+func f(x int, m map[string]int) {
+ // no values
+ _ = f /* ERROR "f\(0, m\) \(no value\) used as value" */ (0, m)
+
+ // built-ins
+ _ = println /* ERROR "println \(built-in\) must be called" */
+
+ // types
+ _ = complex128 /* ERROR "complex128 \(type\) is not an expression" */
+
+ // constants
+ const c1 = 991
+ const c2 float32 = 0.5
+ 0 /* ERROR "0 \(untyped int constant\) is not used" */
+ c1 /* ERROR "c1 \(untyped int constant 991\) is not used" */
+ c2 /* ERROR "c2 \(constant 1/2 of type float32\) is not used" */
+ c1 /* ERROR "c1 \+ c2 \(constant 1983/2 of type float32\) is not used" */ + c2
+
+ // variables
+ x /* ERROR "x \(variable of type int\) is not used" */
+
+ // values
+ x /* ERROR "x != x \(untyped bool value\) is not used" */ != x
+ x /* ERROR "x \+ x \(value of type int\) is not used" */ + x
+
+ // value, ok's
+ const s = "foo"
+ m /* ERROR "m\[s\] \(map index expression of type int\) is not used" */ [s]
+}
+
+// Valid ERROR comments can have a variety of forms.
+func _() {
+ 0 /* ERROR "0 .* is not used" */
+ 0 /* ERROR 0 .* is not used */
+ 0 // ERROR "0 .* is not used"
+ 0 // ERROR 0 .* is not used
+}
+
+// Don't report spurious errors as a consequence of earlier errors.
+// Add more tests as needed.
+func _() {
+ if err := foo /* ERROR undeclared */ (); err != nil /* no error here */ {}
+}
+
+// Use unqualified names for package-local objects.
+type T struct{}
+var _ int = T /* ERROR value of type T */ {} // use T in error message rather then errors.T
diff --git a/llgo/third_party/go.tools/go/types/testdata/expr0.src b/llgo/third_party/go.tools/go/types/testdata/expr0.src
new file mode 100644
index 0000000000000000000000000000000000000000..5afb5d738e466d6e9007a0bac7a13f2744a93aa2
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/expr0.src
@@ -0,0 +1,168 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// unary expressions
+
+package expr0
+
+type mybool bool
+
+var (
+ // bool
+ b0 = true
+ b1 bool = b0
+ b2 = !true
+ b3 = !b1
+ b4 bool = !true
+ b5 bool = !b4
+ b6 = +b0 /* ERROR "not defined" */
+ b7 = -b0 /* ERROR "not defined" */
+ b8 = ^b0 /* ERROR "not defined" */
+ b9 = *b0 /* ERROR "cannot indirect" */
+ b10 = &true /* ERROR "cannot take address" */
+ b11 = &b0
+ b12 = <-b0 /* ERROR "cannot receive" */
+ b13 = & & /* ERROR "cannot take address" */ b0
+
+ // int
+ i0 = 1
+ i1 int = i0
+ i2 = +1
+ i3 = +i0
+ i4 int = +1
+ i5 int = +i4
+ i6 = -1
+ i7 = -i0
+ i8 int = -1
+ i9 int = -i4
+ i10 = !i0 /* ERROR "not defined" */
+ i11 = ^1
+ i12 = ^i0
+ i13 int = ^1
+ i14 int = ^i4
+ i15 = *i0 /* ERROR "cannot indirect" */
+ i16 = &i0
+ i17 = *i16
+ i18 = <-i16 /* ERROR "cannot receive" */
+
+ // uint
+ u0 = uint(1)
+ u1 uint = u0
+ u2 = +1
+ u3 = +u0
+ u4 uint = +1
+ u5 uint = +u4
+ u6 = -1
+ u7 = -u0
+ u8 uint = - /* ERROR "overflows" */ 1
+ u9 uint = -u4
+ u10 = !u0 /* ERROR "not defined" */
+ u11 = ^1
+ u12 = ^i0
+ u13 uint = ^ /* ERROR "overflows" */ 1
+ u14 uint = ^u4
+ u15 = *u0 /* ERROR "cannot indirect" */
+ u16 = &u0
+ u17 = *u16
+ u18 = <-u16 /* ERROR "cannot receive" */
+ u19 = ^uint(0)
+
+ // float64
+ f0 = float64(1)
+ f1 float64 = f0
+ f2 = +1
+ f3 = +f0
+ f4 float64 = +1
+ f5 float64 = +f4
+ f6 = -1
+ f7 = -f0
+ f8 float64 = -1
+ f9 float64 = -f4
+ f10 = !f0 /* ERROR "not defined" */
+ f11 = ^1
+ f12 = ^i0
+ f13 float64 = ^1
+ f14 float64 = ^f4 /* ERROR "not defined" */
+ f15 = *f0 /* ERROR "cannot indirect" */
+ f16 = &f0
+ f17 = *u16
+ f18 = <-u16 /* ERROR "cannot receive" */
+
+ // complex128
+ c0 = complex128(1)
+ c1 complex128 = c0
+ c2 = +1
+ c3 = +c0
+ c4 complex128 = +1
+ c5 complex128 = +c4
+ c6 = -1
+ c7 = -c0
+ c8 complex128 = -1
+ c9 complex128 = -c4
+ c10 = !c0 /* ERROR "not defined" */
+ c11 = ^1
+ c12 = ^i0
+ c13 complex128 = ^1
+ c14 complex128 = ^c4 /* ERROR "not defined" */
+ c15 = *c0 /* ERROR "cannot indirect" */
+ c16 = &c0
+ c17 = *u16
+ c18 = <-u16 /* ERROR "cannot receive" */
+
+ // string
+ s0 = "foo"
+ s1 = +"foo" /* ERROR "not defined" */
+ s2 = -s0 /* ERROR "not defined" */
+ s3 = !s0 /* ERROR "not defined" */
+ s4 = ^s0 /* ERROR "not defined" */
+ s5 = *s4
+ s6 = &s4
+ s7 = *s6
+ s8 = <-s7
+
+ // channel
+ ch chan int
+ rc <-chan float64
+ sc chan <- string
+ ch0 = +ch /* ERROR "not defined" */
+ ch1 = -ch /* ERROR "not defined" */
+ ch2 = !ch /* ERROR "not defined" */
+ ch3 = ^ch /* ERROR "not defined" */
+ ch4 = *ch /* ERROR "cannot indirect" */
+ ch5 = &ch
+ ch6 = *ch5
+ ch7 = <-ch
+ ch8 = <-rc
+ ch9 = <-sc /* ERROR "cannot receive" */
+ ch10, ok = <-ch
+ // ok is of type bool
+ ch11, myok = <-ch
+ _ mybool = myok /* ERROR "cannot initialize" */
+)
+
+// address of composite literals
+type T struct{x, y int}
+
+func f() T { return T{} }
+
+var (
+ _ = &T{1, 2}
+ _ = &[...]int{}
+ _ = &[]int{}
+ _ = &[]int{}
+ _ = &map[string]T{}
+ _ = &(T{1, 2})
+ _ = &((((T{1, 2}))))
+ _ = &f /* ERROR "cannot take address" */ ()
+)
+
+// recursive pointer types
+type P *P
+
+var (
+ p1 P = new(P)
+ p2 P = *p1
+ p3 P = &p2
+)
+
diff --git a/llgo/third_party/go.tools/go/types/testdata/expr1.src b/llgo/third_party/go.tools/go/types/testdata/expr1.src
new file mode 100644
index 0000000000000000000000000000000000000000..8ef0aed6d2ea16ed8d811338a04c1f68fab08751
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/expr1.src
@@ -0,0 +1,7 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// binary expressions
+
+package expr1
diff --git a/llgo/third_party/go.tools/go/types/testdata/expr2.src b/llgo/third_party/go.tools/go/types/testdata/expr2.src
new file mode 100644
index 0000000000000000000000000000000000000000..31dc5f021c029f47654cdb7ad3cd777b829a7f80
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/expr2.src
@@ -0,0 +1,247 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// comparisons
+
+package expr2
+
+func _bool() {
+ const t = true == true
+ const f = true == false
+ _ = t /* ERROR "cannot compare" */ < f
+ _ = 0 /* ERROR "cannot convert" */ == t
+ var b bool
+ var x, y float32
+ b = x < y
+ _ = b
+ _ = struct{b bool}{x < y}
+}
+
+// corner cases
+var (
+ v0 = nil /* ERROR "cannot compare" */ == nil
+)
+
+func arrays() {
+ // basics
+ var a, b [10]int
+ _ = a == b
+ _ = a != b
+ _ = a /* ERROR < not defined */ < b
+ _ = a == nil /* ERROR cannot convert */
+
+ type C [10]int
+ var c C
+ _ = a == c
+
+ type D [10]int
+ var d D
+ _ = c /* ERROR mismatched types */ == d
+
+ var e [10]func() int
+ _ = e /* ERROR == not defined */ == e
+}
+
+func structs() {
+ // basics
+ var s, t struct {
+ x int
+ a [10]float32
+ _ bool
+ }
+ _ = s == t
+ _ = s != t
+ _ = s /* ERROR < not defined */ < t
+ _ = s == nil /* ERROR cannot convert */
+
+ type S struct {
+ x int
+ a [10]float32
+ _ bool
+ }
+ type T struct {
+ x int
+ a [10]float32
+ _ bool
+ }
+ var ss S
+ var tt T
+ _ = s == ss
+ _ = ss /* ERROR mismatched types */ == tt
+
+ var u struct {
+ x int
+ a [10]map[string]int
+ }
+ _ = u /* ERROR cannot compare */ == u
+}
+
+func pointers() {
+ // nil
+ _ = nil /* ERROR == not defined */ == nil
+ _ = nil /* ERROR != not defined */ != nil
+ _ = nil /* ERROR < not defined */ < nil
+ _ = nil /* ERROR <= not defined */ <= nil
+ _ = nil /* ERROR > not defined */ > nil
+ _ = nil /* ERROR >= not defined */ >= nil
+
+ // basics
+ var p, q *int
+ _ = p == q
+ _ = p != q
+
+ _ = p == nil
+ _ = p != nil
+ _ = nil == q
+ _ = nil != q
+
+ _ = p /* ERROR < not defined */ < q
+ _ = p /* ERROR <= not defined */ <= q
+ _ = p /* ERROR > not defined */ > q
+ _ = p /* ERROR >= not defined */ >= q
+
+ // various element types
+ type (
+ S1 struct{}
+ S2 struct{}
+ P1 *S1
+ P2 *S2
+ )
+ var (
+ ps1 *S1
+ ps2 *S2
+ p1 P1
+ p2 P2
+ )
+ _ = ps1 == ps1
+ _ = ps1 /* ERROR mismatched types */ == ps2
+ _ = ps2 /* ERROR mismatched types */ == ps1
+
+ _ = p1 == p1
+ _ = p1 /* ERROR mismatched types */ == p2
+
+ _ = p1 == ps1
+}
+
+func channels() {
+ // basics
+ var c, d chan int
+ _ = c == d
+ _ = c != d
+ _ = c == nil
+ _ = c /* ERROR < not defined */ < d
+
+ // various element types (named types)
+ type (
+ C1 chan int
+ C1r <-chan int
+ C1s chan<- int
+ C2 chan float32
+ )
+ var (
+ c1 C1
+ c1r C1r
+ c1s C1s
+ c1a chan int
+ c2 C2
+ )
+ _ = c1 == c1
+ _ = c1 /* ERROR mismatched types */ == c1r
+ _ = c1 /* ERROR mismatched types */ == c1s
+ _ = c1r /* ERROR mismatched types */ == c1s
+ _ = c1 == c1a
+ _ = c1a == c1
+ _ = c1 /* ERROR mismatched types */ == c2
+ _ = c1a /* ERROR mismatched types */ == c2
+
+ // various element types (unnamed types)
+ var (
+ d1 chan int
+ d1r <-chan int
+ d1s chan<- int
+ d1a chan<- int
+ d2 chan float32
+ )
+ _ = d1 == d1
+ _ = d1 == d1r
+ _ = d1 == d1s
+ _ = d1r /* ERROR mismatched types */ == d1s
+ _ = d1 == d1a
+ _ = d1a == d1
+ _ = d1 /* ERROR mismatched types */ == d2
+ _ = d1a /* ERROR mismatched types */ == d2
+}
+
+// for interfaces test
+type S1 struct{}
+type S11 struct{}
+type S2 struct{}
+func (*S1) m() int
+func (*S11) m() int
+func (*S11) n()
+func (*S2) m() float32
+
+func interfaces() {
+ // basics
+ var i, j interface{ m() int }
+ _ = i == j
+ _ = i != j
+ _ = i == nil
+ _ = i /* ERROR < not defined */ < j
+
+ // various interfaces
+ var ii interface { m() int; n() }
+ var k interface { m() float32 }
+ _ = i == ii
+ _ = i /* ERROR mismatched types */ == k
+
+ // interfaces vs values
+ var s1 S1
+ var s11 S11
+ var s2 S2
+
+ _ = i == 0 /* ERROR cannot convert */
+ _ = i /* ERROR mismatched types */ == s1
+ _ = i == &s1
+ _ = i == &s11
+
+ _ = i /* ERROR mismatched types */ == s2
+ _ = i /* ERROR mismatched types */ == &s2
+}
+
+func slices() {
+ // basics
+ var s []int
+ _ = s == nil
+ _ = s != nil
+ _ = s /* ERROR < not defined */ < nil
+
+ // slices are not otherwise comparable
+ _ = s /* ERROR == not defined */ == s
+ _ = s /* ERROR < not defined */ < s
+}
+
+func maps() {
+ // basics
+ var m map[string]int
+ _ = m == nil
+ _ = m != nil
+ _ = m /* ERROR < not defined */ < nil
+
+ // maps are not otherwise comparable
+ _ = m /* ERROR == not defined */ == m
+ _ = m /* ERROR < not defined */ < m
+}
+
+func funcs() {
+ // basics
+ var f func(int) float32
+ _ = f == nil
+ _ = f != nil
+ _ = f /* ERROR < not defined */ < nil
+
+ // funcs are not otherwise comparable
+ _ = f /* ERROR == not defined */ == f
+ _ = f /* ERROR < not defined */ < f
+}
diff --git a/llgo/third_party/go.tools/go/types/testdata/expr3.src b/llgo/third_party/go.tools/go/types/testdata/expr3.src
new file mode 100644
index 0000000000000000000000000000000000000000..50ae7c4c1d49486598368d3cb79f60da25118c00
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/expr3.src
@@ -0,0 +1,483 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package expr3
+
+func indexes() {
+ _ = 1 /* ERROR "cannot index" */ [0]
+ _ = indexes /* ERROR "cannot index" */ [0]
+ _ = ( /* ERROR "cannot slice" */ 12 + 3)[1:2]
+
+ var a [10]int
+ _ = a[true /* ERROR "cannot convert" */ ]
+ _ = a["foo" /* ERROR "cannot convert" */ ]
+ _ = a[1.1 /* ERROR "truncated" */ ]
+ _ = a[1.0]
+ _ = a[- /* ERROR "negative" */ 1]
+ _ = a[- /* ERROR "negative" */ 1 :]
+ _ = a[: - /* ERROR "negative" */ 1]
+ _ = a[: /* ERROR "2nd index required" */ : /* ERROR "3rd index required" */ ]
+ _ = a[0: /* ERROR "2nd index required" */ : /* ERROR "3rd index required" */ ]
+ _ = a[0: /* ERROR "2nd index required" */ :10]
+ _ = a[:10:10]
+
+ var a0 int
+ a0 = a[0]
+ _ = a0
+ var a1 int32
+ a1 = a /* ERROR "cannot assign" */ [1]
+ _ = a1
+
+ _ = a[9]
+ _ = a[10 /* ERROR "index .* out of bounds" */ ]
+ _ = a[1 /* ERROR "overflows" */ <<100]
+ _ = a[10:]
+ _ = a[:10]
+ _ = a[10:10]
+ _ = a[11 /* ERROR "index .* out of bounds" */ :]
+ _ = a[: 11 /* ERROR "index .* out of bounds" */ ]
+ _ = a[: 1 /* ERROR "overflows" */ <<100]
+ _ = a[:10:10]
+ _ = a[:11 /* ERROR "index .* out of bounds" */ :10]
+ _ = a[:10:11 /* ERROR "index .* out of bounds" */ ]
+ _ = a[10:0:10] /* ERROR "invalid slice indices" */
+ _ = a[0:10:0] /* ERROR "invalid slice indices" */
+ _ = a[10:0:0] /* ERROR "invalid slice indices" */
+ _ = &a /* ERROR "cannot take address" */ [:10]
+
+ pa := &a
+ _ = pa[9]
+ _ = pa[10 /* ERROR "index .* out of bounds" */ ]
+ _ = pa[1 /* ERROR "overflows" */ <<100]
+ _ = pa[10:]
+ _ = pa[:10]
+ _ = pa[10:10]
+ _ = pa[11 /* ERROR "index .* out of bounds" */ :]
+ _ = pa[: 11 /* ERROR "index .* out of bounds" */ ]
+ _ = pa[: 1 /* ERROR "overflows" */ <<100]
+ _ = pa[:10:10]
+ _ = pa[:11 /* ERROR "index .* out of bounds" */ :10]
+ _ = pa[:10:11 /* ERROR "index .* out of bounds" */ ]
+ _ = pa[10:0:10] /* ERROR "invalid slice indices" */
+ _ = pa[0:10:0] /* ERROR "invalid slice indices" */
+ _ = pa[10:0:0] /* ERROR "invalid slice indices" */
+ _ = &pa /* ERROR "cannot take address" */ [:10]
+
+ var b [0]int
+ _ = b[0 /* ERROR "index .* out of bounds" */ ]
+ _ = b[:]
+ _ = b[0:]
+ _ = b[:0]
+ _ = b[0:0]
+ _ = b[0:0:0]
+ _ = b[1 /* ERROR "index .* out of bounds" */ :0:0]
+
+ var s []int
+ _ = s[- /* ERROR "negative" */ 1]
+ _ = s[- /* ERROR "negative" */ 1 :]
+ _ = s[: - /* ERROR "negative" */ 1]
+ _ = s[0]
+ _ = s[1:2]
+ _ = s[2:1] /* ERROR "invalid slice indices" */
+ _ = s[2:]
+ _ = s[: 1 /* ERROR "overflows" */ <<100]
+ _ = s[1 /* ERROR "overflows" */ <<100 :]
+ _ = s[1 /* ERROR "overflows" */ <<100 : 1 /* ERROR "overflows" */ <<100]
+ _ = s[: /* ERROR "2nd index required" */ : /* ERROR "3rd index required" */ ]
+ _ = s[:10:10]
+ _ = s[10:0:10] /* ERROR "invalid slice indices" */
+ _ = s[0:10:0] /* ERROR "invalid slice indices" */
+ _ = s[10:0:0] /* ERROR "invalid slice indices" */
+ _ = &s /* ERROR "cannot take address" */ [:10]
+
+ var m map[string]int
+ _ = m[0 /* ERROR "cannot convert" */ ]
+ _ = m /* ERROR "cannot slice" */ ["foo" : "bar"]
+ _ = m["foo"]
+ // ok is of type bool
+ type mybool bool
+ var ok mybool
+ _, ok = m["bar"]
+ _ = ok
+
+
+ var t string
+ _ = t[- /* ERROR "negative" */ 1]
+ _ = t[- /* ERROR "negative" */ 1 :]
+ _ = t[: - /* ERROR "negative" */ 1]
+ _ = t /* ERROR "3-index slice of string" */ [1:2:3]
+ _ = "foo" /* ERROR "3-index slice of string" */ [1:2:3]
+ var t0 byte
+ t0 = t[0]
+ _ = t0
+ var t1 rune
+ t1 = t /* ERROR "cannot assign" */ [2]
+ _ = t1
+ _ = ("foo" + "bar")[5]
+ _ = ("foo" + "bar")[6 /* ERROR "index .* out of bounds" */ ]
+
+ const c = "foo"
+ _ = c[- /* ERROR "negative" */ 1]
+ _ = c[- /* ERROR "negative" */ 1 :]
+ _ = c[: - /* ERROR "negative" */ 1]
+ var c0 byte
+ c0 = c[0]
+ _ = c0
+ var c2 float32
+ c2 = c /* ERROR "cannot assign" */ [2]
+ _ = c[3 /* ERROR "index .* out of bounds" */ ]
+ _ = ""[0 /* ERROR "index .* out of bounds" */ ]
+ _ = c2
+
+ _ = s[1<<30] // no compile-time error here
+
+ // issue 4913
+ type mystring string
+ var ss string
+ var ms mystring
+ var i, j int
+ ss = "foo"[1:2]
+ ss = "foo"[i:j]
+ ms = "foo" /* ERROR "cannot assign" */ [1:2]
+ ms = "foo" /* ERROR "cannot assign" */ [i:j]
+ _, _ = ss, ms
+}
+
+type T struct {
+ x int
+ y func()
+}
+
+func (*T) m() {}
+
+func method_expressions() {
+ _ = T /* ERROR "no field or method" */ .a
+ _ = T /* ERROR "has no method" */ .x
+ _ = T /* ERROR "not in method set" */ .m
+ _ = (*T).m
+
+ var f func(*T) = T /* ERROR "not in method set" */ .m
+ var g func(*T) = (*T).m
+ _, _ = f, g
+
+ _ = T /* ERROR "has no method" */ .y
+ _ = ( /* ERROR "has no method" */ *T).y
+}
+
+func struct_literals() {
+ type T0 struct {
+ a, b, c int
+ }
+
+ type T1 struct {
+ T0
+ a, b int
+ u float64
+ s string
+ }
+
+ // keyed elements
+ _ = T1{}
+ _ = T1{a: 0, 1 /* ERROR "mixture of .* elements" */ }
+ _ = T1{aa /* ERROR "unknown field" */ : 0}
+ _ = T1{1 /* ERROR "invalid field name" */ : 0}
+ _ = T1{a: 0, s: "foo", u: 0, a /* ERROR "duplicate field" */: 10}
+ _ = T1{a: "foo" /* ERROR "cannot convert" */ }
+ _ = T1{c /* ERROR "unknown field" */ : 0}
+ _ = T1{T0: { /* ERROR "missing type" */ }}
+ _ = T1{T0: T0{}}
+ _ = T1{T0 /* ERROR "invalid field name" */ .a: 0}
+
+ // unkeyed elements
+ _ = T0{1, 2, 3}
+ _ = T0{1, b /* ERROR "mixture" */ : 2, 3}
+ _ = T0{1, 2} /* ERROR "too few values" */
+ _ = T0{1, 2, 3, 4 /* ERROR "too many values" */ }
+ _ = T0{1, "foo" /* ERROR "cannot convert" */, 3.4 /* ERROR "truncated" */}
+
+ // invalid type
+ type P *struct{
+ x int
+ }
+ _ = P /* ERROR "invalid composite literal type" */ {}
+}
+
+func array_literals() {
+ type A0 [0]int
+ _ = A0{}
+ _ = A0{0 /* ERROR "index .* out of bounds" */}
+ _ = A0{0 /* ERROR "index .* out of bounds" */ : 0}
+
+ type A1 [10]int
+ _ = A1{}
+ _ = A1{0, 1, 2}
+ _ = A1{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+ _ = A1{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 /* ERROR "index .* out of bounds" */ }
+ _ = A1{- /* ERROR "negative" */ 1: 0}
+ _ = A1{8: 8, 9}
+ _ = A1{8: 8, 9, 10 /* ERROR "index .* out of bounds" */ }
+ _ = A1{0, 1, 2, 0 /* ERROR "duplicate index" */ : 0, 3: 3, 4}
+ _ = A1{5: 5, 6, 7, 3: 3, 4}
+ _ = A1{5: 5, 6, 7, 3: 3, 4, 5 /* ERROR "duplicate index" */ }
+ _ = A1{10 /* ERROR "index .* out of bounds" */ : 10, 10 /* ERROR "index .* out of bounds" */ : 10}
+ _ = A1{5: 5, 6, 7, 3: 3, 1 /* ERROR "overflows" */ <<100: 4, 5 /* ERROR "duplicate index" */ }
+ _ = A1{5: 5, 6, 7, 4: 4, 1 /* ERROR "overflows" */ <<100: 4}
+ _ = A1{2.0}
+ _ = A1{2.1 /* ERROR "truncated" */ }
+ _ = A1{"foo" /* ERROR "cannot convert" */ }
+
+ // indices must be integer constants
+ i := 1
+ const f = 2.1
+ const s = "foo"
+ _ = A1{i /* ERROR "index i must be integer constant" */ : 0}
+ _ = A1{f /* ERROR "truncated" */ : 0}
+ _ = A1{s /* ERROR "cannot convert" */ : 0}
+
+ a0 := [...]int{}
+ assert(len(a0) == 0)
+
+ a1 := [...]int{0, 1, 2}
+ assert(len(a1) == 3)
+ var a13 [3]int
+ var a14 [4]int
+ a13 = a1
+ a14 = a1 /* ERROR "cannot assign" */
+ _, _ = a13, a14
+
+ a2 := [...]int{- /* ERROR "negative" */ 1: 0}
+ _ = a2
+
+ a3 := [...]int{0, 1, 2, 0 /* ERROR "duplicate index" */ : 0, 3: 3, 4}
+ assert(len(a3) == 5) // somewhat arbitrary
+
+ a4 := [...]complex128{0, 1, 2, 1<<10-2: -1i, 1i, 400: 10, 12, 14}
+ assert(len(a4) == 1024)
+
+ // from the spec
+ type Point struct { x, y float32 }
+ _ = [...]Point{Point{1.5, -3.5}, Point{0, 0}}
+ _ = [...]Point{{1.5, -3.5}, {0, 0}}
+ _ = [][]int{[]int{1, 2, 3}, []int{4, 5}}
+ _ = [][]int{{1, 2, 3}, {4, 5}}
+ _ = [...]*Point{&Point{1.5, -3.5}, &Point{0, 0}}
+ _ = [...]*Point{{1.5, -3.5}, {0, 0}}
+}
+
+func slice_literals() {
+ type S0 []int
+ _ = S0{}
+ _ = S0{0, 1, 2}
+ _ = S0{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+ _ = S0{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
+ _ = S0{- /* ERROR "negative" */ 1: 0}
+ _ = S0{8: 8, 9}
+ _ = S0{8: 8, 9, 10}
+ _ = S0{0, 1, 2, 0 /* ERROR "duplicate index" */ : 0, 3: 3, 4}
+ _ = S0{5: 5, 6, 7, 3: 3, 4}
+ _ = S0{5: 5, 6, 7, 3: 3, 4, 5 /* ERROR "duplicate index" */ }
+ _ = S0{10: 10, 10 /* ERROR "duplicate index" */ : 10}
+ _ = S0{5: 5, 6, 7, 3: 3, 1 /* ERROR "overflows" */ <<100: 4, 5 /* ERROR "duplicate index" */ }
+ _ = S0{5: 5, 6, 7, 4: 4, 1 /* ERROR "overflows" */ <<100: 4}
+ _ = S0{2.0}
+ _ = S0{2.1 /* ERROR "truncated" */ }
+ _ = S0{"foo" /* ERROR "cannot convert" */ }
+
+ // indices must be resolved correctly
+ const index1 = 1
+ _ = S0{index1: 1}
+ _ = S0{index2: 2}
+ _ = S0{index3 /* ERROR "undeclared name" */ : 3}
+
+ // indices must be integer constants
+ i := 1
+ const f = 2.1
+ const s = "foo"
+ _ = S0{i /* ERROR "index i must be integer constant" */ : 0}
+ _ = S0{f /* ERROR "truncated" */ : 0}
+ _ = S0{s /* ERROR "cannot convert" */ : 0}
+
+}
+
+const index2 int = 2
+
+type N int
+func (N) f() {}
+
+func map_literals() {
+ type M0 map[string]int
+ type M1 map[bool]int
+ type M2 map[*int]int
+
+ _ = M0{}
+ _ = M0{1 /* ERROR "missing key" */ }
+ _ = M0{1 /* ERROR "cannot convert" */ : 2}
+ _ = M0{"foo": "bar" /* ERROR "cannot convert" */ }
+ _ = M0{"foo": 1, "bar": 2, "foo" /* ERROR "duplicate key" */ : 3 }
+
+ _ = map[interface{}]int{2: 1, 2 /* ERROR "duplicate key" */ : 1}
+ _ = map[interface{}]int{int(2): 1, int16(2): 1}
+ _ = map[interface{}]int{int16(2): 1, int16 /* ERROR "duplicate key" */ (2): 1}
+
+ type S string
+
+ _ = map[interface{}]int{"a": 1, "a" /* ERROR "duplicate key" */ : 1}
+ _ = map[interface{}]int{"a": 1, S("a"): 1}
+ _ = map[interface{}]int{S("a"): 1, S /* ERROR "duplicate key" */ ("a"): 1}
+
+ type I interface {
+ f()
+ }
+
+ _ = map[I]int{N(0): 1, N(2): 1}
+ _ = map[I]int{N(2): 1, N /* ERROR "duplicate key" */ (2): 1}
+
+ // map keys must be resolved correctly
+ key1 := "foo"
+ _ = M0{key1: 1}
+ _ = M0{key2: 2}
+ _ = M0{key3 /* ERROR "undeclared name" */ : 2}
+
+ var value int
+ _ = M1{true: 1, false: 0}
+ _ = M2{nil: 0, &value: 1}
+}
+
+var key2 string = "bar"
+
+type I interface {
+ m()
+}
+
+type I2 interface {
+ m(int)
+}
+
+type T1 struct{}
+type T2 struct{}
+
+func (T2) m(int) {}
+
+type mybool bool
+
+func type_asserts() {
+ var x int
+ _ = x /* ERROR "not an interface" */ .(int)
+
+ var e interface{}
+ var ok bool
+ x, ok = e.(int)
+ _ = ok
+
+ // ok value is of type bool
+ var myok mybool
+ _, myok = e.(int)
+ _ = myok
+
+ var t I
+ _ = t /* ERROR "use of .* outside type switch" */ .(type)
+ _ = t /* ERROR "missing method m" */ .(T)
+ _ = t.(*T)
+ _ = t /* ERROR "missing method m" */ .(T1)
+ _ = t /* ERROR "wrong type for method m" */ .(T2)
+ _ = t /* STRICT "wrong type for method m" */ .(I2) // only an error in strict mode (issue 8561)
+
+ // e doesn't statically have an m, but may have one dynamically.
+ _ = e.(I2)
+}
+
+func f0() {}
+func f1(x int) {}
+func f2(u float32, s string) {}
+func fs(s []byte) {}
+func fv(x ...int) {}
+func fi(x ... interface{}) {}
+func (T) fm(x ...int)
+
+func g0() {}
+func g1() int { return 0}
+func g2() (u float32, s string) { return }
+func gs() []byte { return nil }
+
+func _calls() {
+ var x int
+ var y float32
+ var s []int
+
+ f0()
+ _ = f0 /* ERROR "used as value" */ ()
+ f0(g0 /* ERROR "too many arguments" */ )
+
+ f1(0)
+ f1(x)
+ f1(10.0)
+ f1() /* ERROR "too few arguments" */
+ f1(x, y /* ERROR "too many arguments" */ )
+ f1(s /* ERROR "cannot pass" */ )
+ f1(x ... /* ERROR "cannot use ..." */ )
+ f1(g0 /* ERROR "used as value" */ ())
+ f1(g1())
+ // f1(g2()) // TODO(gri) missing position in error message
+
+ f2() /* ERROR "too few arguments" */
+ f2(3.14) /* ERROR "too few arguments" */
+ f2(3.14, "foo")
+ f2(x /* ERROR "cannot pass" */ , "foo")
+ f2(g0 /* ERROR "used as value" */ ())
+ f2(g1 /* ERROR "cannot pass" */ ()) /* ERROR "too few arguments" */
+ f2(g2())
+
+ fs() /* ERROR "too few arguments" */
+ fs(g0 /* ERROR "used as value" */ ())
+ fs(g1 /* ERROR "cannot pass" */ ())
+ fs(g2 /* ERROR "cannot pass" */ /* ERROR "too many arguments" */ ())
+ fs(gs())
+
+ fv()
+ fv(1, 2.0, x)
+ fv(s /* ERROR "cannot pass" */ )
+ fv(s...)
+ fv(x /* ERROR "cannot use" */ ...)
+ fv(1, s /* ERROR "can only use ... with matching parameter" */ ...)
+ fv(gs /* ERROR "cannot pass" */ ())
+ fv(gs /* ERROR "cannot pass" */ ()...)
+
+ var t T
+ t.fm()
+ t.fm(1, 2.0, x)
+ t.fm(s /* ERROR "cannot pass" */ )
+ t.fm(g1())
+ t.fm(1, s /* ERROR "can only use ... with matching parameter" */ ...)
+ t.fm(gs /* ERROR "cannot pass" */ ())
+ t.fm(gs /* ERROR "cannot pass" */ ()...)
+
+ T.fm(t, )
+ T.fm(t, 1, 2.0, x)
+ T.fm(t, s /* ERROR "cannot pass" */ )
+ T.fm(t, g1())
+ T.fm(t, 1, s /* ERROR "can only use ... with matching parameter" */ ...)
+ T.fm(t, gs /* ERROR "cannot pass" */ ())
+ T.fm(t, gs /* ERROR "cannot pass" */ ()...)
+
+ var i interface{ fm(x ...int) } = t
+ i.fm()
+ i.fm(1, 2.0, x)
+ i.fm(s /* ERROR "cannot pass" */ )
+ i.fm(g1())
+ i.fm(1, s /* ERROR "can only use ... with matching parameter" */ ...)
+ i.fm(gs /* ERROR "cannot pass" */ ())
+ i.fm(gs /* ERROR "cannot pass" */ ()...)
+
+ fi()
+ fi(1, 2.0, x, 3.14, "foo")
+ fi(g2())
+ fi(0, g2)
+ fi(0, g2 /* ERROR "2-valued expression" */ ())
+}
+
+func issue6344() {
+ type T []interface{}
+ var x T
+ fi(x...) // ... applies also to named slices
+}
diff --git a/llgo/third_party/go.tools/go/types/testdata/gotos.src b/llgo/third_party/go.tools/go/types/testdata/gotos.src
new file mode 100644
index 0000000000000000000000000000000000000000..0c7ee44056521cf39bcb792d8b5b1df09c7e40c2
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/gotos.src
@@ -0,0 +1,560 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a modified copy of $GOROOT/test/goto.go.
+
+package gotos
+
+var (
+ i, n int
+ x []int
+ c chan int
+ m map[int]int
+ s string
+)
+
+// goto after declaration okay
+func _() {
+ x := 1
+ goto L
+L:
+ _ = x
+}
+
+// goto before declaration okay
+func _() {
+ goto L
+L:
+ x := 1
+ _ = x
+}
+
+// goto across declaration not okay
+func _() {
+ goto L /* ERROR "goto L jumps over variable declaration at line 36" */
+ x := 1
+ _ = x
+L:
+}
+
+// goto across declaration in inner scope okay
+func _() {
+ goto L
+ {
+ x := 1
+ _ = x
+ }
+L:
+}
+
+// goto across declaration after inner scope not okay
+func _() {
+ goto L /* ERROR "goto L jumps over variable declaration at line 58" */
+ {
+ x := 1
+ _ = x
+ }
+ x := 1
+ _ = x
+L:
+}
+
+// goto across declaration in reverse okay
+func _() {
+L:
+ x := 1
+ _ = x
+ goto L
+}
+
+func _() {
+L: L1:
+ x := 1
+ _ = x
+ goto L
+ goto L1
+}
+
+// error shows first offending variable
+func _() {
+ goto L /* ERROR "goto L jumps over variable declaration at line 84" */
+ x := 1
+ _ = x
+ y := 1
+ _ = y
+L:
+}
+
+// goto not okay even if code path is dead
+func _() {
+ goto L /* ERROR "goto L jumps over variable declaration" */
+ x := 1
+ _ = x
+ y := 1
+ _ = y
+ return
+L:
+}
+
+// goto into outer block okay
+func _() {
+ {
+ goto L
+ }
+L:
+}
+
+func _() {
+ {
+ goto L
+ goto L1
+ }
+L: L1:
+}
+
+// goto backward into outer block okay
+func _() {
+L:
+ {
+ goto L
+ }
+}
+
+func _() {
+L: L1:
+ {
+ goto L
+ goto L1
+ }
+}
+
+// goto into inner block not okay
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ {
+ L:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ goto L1 /* ERROR "goto L1 jumps into block" */
+ {
+ L: L1:
+ }
+}
+
+// goto backward into inner block still not okay
+func _() {
+ {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ {
+ L: L1:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+ goto L1 /* ERROR "goto L1 jumps into block" */
+}
+
+// error shows first (outermost) offending block
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ {
+ {
+ {
+ L:
+ }
+ }
+ }
+}
+
+// error prefers block diagnostic over declaration diagnostic
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ x := 1
+ _ = x
+ {
+ L:
+ }
+}
+
+// many kinds of blocks, all invalid to jump into or among,
+// but valid to jump out of
+
+// if
+
+func _() {
+L:
+ if true {
+ goto L
+ }
+}
+
+func _() {
+L:
+ if true {
+ goto L
+ } else {
+ }
+}
+
+func _() {
+L:
+ if false {
+ } else {
+ goto L
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ if true {
+ L:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ if true {
+ L:
+ } else {
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ if true {
+ } else {
+ L:
+ }
+}
+
+func _() {
+ if false {
+ L:
+ } else {
+ goto L /* ERROR "goto L jumps into block" */
+ }
+}
+
+func _() {
+ if true {
+ goto L /* ERROR "goto L jumps into block" */
+ } else {
+ L:
+ }
+}
+
+func _() {
+ if true {
+ goto L /* ERROR "goto L jumps into block" */
+ } else if false {
+ L:
+ }
+}
+
+func _() {
+ if true {
+ goto L /* ERROR "goto L jumps into block" */
+ } else if false {
+ L:
+ } else {
+ }
+}
+
+func _() {
+ if true {
+ goto L /* ERROR "goto L jumps into block" */
+ } else if false {
+ } else {
+ L:
+ }
+}
+
+func _() {
+ if true {
+ goto L /* ERROR "goto L jumps into block" */
+ } else {
+ L:
+ }
+}
+
+func _() {
+ if true {
+ L:
+ } else {
+ goto L /* ERROR "goto L jumps into block" */
+ }
+}
+
+// for
+
+func _() {
+ for {
+ goto L
+ }
+L:
+}
+
+func _() {
+ for {
+ goto L
+ L:
+ }
+}
+
+func _() {
+ for {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ for {
+ goto L
+ L1:
+ }
+L:
+ goto L1 /* ERROR "goto L1 jumps into block" */
+}
+
+func _() {
+ for i < n {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ for i = 0; i < n; i++ {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ for i = range x {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ for i = range c {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ for i = range m {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ for i = range s {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+// switch
+
+func _() {
+L:
+ switch i {
+ case 0:
+ goto L
+ }
+}
+
+func _() {
+L:
+ switch i {
+ case 0:
+
+ default:
+ goto L
+ }
+}
+
+func _() {
+ switch i {
+ case 0:
+
+ default:
+ L:
+ goto L
+ }
+}
+
+func _() {
+ switch i {
+ case 0:
+
+ default:
+ goto L
+ L:
+ }
+}
+
+func _() {
+ switch i {
+ case 0:
+ goto L
+ L:
+ ;
+ default:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ switch i {
+ case 0:
+ L:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ switch i {
+ case 0:
+ L:
+ ;
+ default:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ switch i {
+ case 0:
+ default:
+ L:
+ }
+}
+
+func _() {
+ switch i {
+ default:
+ goto L /* ERROR "goto L jumps into block" */
+ case 0:
+ L:
+ }
+}
+
+func _() {
+ switch i {
+ case 0:
+ L:
+ ;
+ default:
+ goto L /* ERROR "goto L jumps into block" */
+ }
+}
+
+// select
+// different from switch. the statement has no implicit block around it.
+
+func _() {
+L:
+ select {
+ case <-c:
+ goto L
+ }
+}
+
+func _() {
+L:
+ select {
+ case c <- 1:
+
+ default:
+ goto L
+ }
+}
+
+func _() {
+ select {
+ case <-c:
+
+ default:
+ L:
+ goto L
+ }
+}
+
+func _() {
+ select {
+ case c <- 1:
+
+ default:
+ goto L
+ L:
+ }
+}
+
+func _() {
+ select {
+ case <-c:
+ goto L
+ L:
+ ;
+ default:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ select {
+ case c <- 1:
+ L:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ select {
+ case c <- 1:
+ L:
+ ;
+ default:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ select {
+ case <-c:
+ default:
+ L:
+ }
+}
+
+func _() {
+ select {
+ default:
+ goto L /* ERROR "goto L jumps into block" */
+ case <-c:
+ L:
+ }
+}
+
+func _() {
+ select {
+ case <-c:
+ L:
+ ;
+ default:
+ goto L /* ERROR "goto L jumps into block" */
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/testdata/importdecl0a.src b/llgo/third_party/go.tools/go/types/testdata/importdecl0a.src
new file mode 100644
index 0000000000000000000000000000000000000000..463dcd083dd7ffa793ae2adf59a40fa953719fed
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/importdecl0a.src
@@ -0,0 +1,53 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package importdecl0
+
+import ()
+
+import (
+ // we can have multiple blank imports (was bug)
+ _ "math"
+ _ "net/rpc"
+ init /* ERROR "cannot declare init" */ "fmt"
+ // reflect defines a type "flag" which shows up in the gc export data
+ "reflect"
+ . /* ERROR "imported but not used" */ "reflect"
+)
+
+import "math" /* ERROR "imported but not used" */
+import m /* ERROR "imported but not used as m" */ "math"
+import _ "math"
+
+import (
+ "math/big" /* ERROR "imported but not used" */
+ b /* ERROR "imported but not used" */ "math/big"
+ _ "math/big"
+)
+
+import "fmt"
+import f1 "fmt"
+import f2 "fmt"
+
+// reflect.flag must not be visible in this package
+type flag int
+type _ reflect /* ERROR "not exported" */ .flag
+
+// imported package name may conflict with local objects
+type reflect /* ERROR "reflect already declared" */ int
+
+// dot-imported exported objects may conflict with local objects
+type Value /* ERROR "Value already declared through dot-import of package reflect" */ struct{}
+
+var _ = fmt.Println // use "fmt"
+
+func _() {
+ f1.Println() // use "fmt"
+}
+
+func _() {
+ _ = func() {
+ f2.Println() // use "fmt"
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/testdata/importdecl0b.src b/llgo/third_party/go.tools/go/types/testdata/importdecl0b.src
new file mode 100644
index 0000000000000000000000000000000000000000..6844e7098233eb5488c860c03f5a3274f1b467d5
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/importdecl0b.src
@@ -0,0 +1,33 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package importdecl0
+
+import "math"
+import m "math"
+
+import . "testing" // declares T in file scope
+import . /* ERROR "imported but not used" */ "unsafe"
+import . "fmt" // declares Println in file scope
+
+import (
+ // TODO(gri) At the moment, 2 errors are reported because both go/parser
+ // and the type checker report it. Eventually, this test should not be
+ // done by the parser anymore.
+ "" /* ERROR invalid import path */ /* ERROR invalid import path */
+ "a!b" /* ERROR invalid import path */ /* ERROR invalid import path */
+ "abc\xffdef" /* ERROR invalid import path */ /* ERROR invalid import path */
+)
+
+// using "math" in this file doesn't affect its use in other files
+const Pi0 = math.Pi
+const Pi1 = m.Pi
+
+type _ T // use "testing"
+
+func _() func() interface{} {
+ return func() interface{} {
+ return Println // use "fmt"
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/testdata/importdecl1a.src b/llgo/third_party/go.tools/go/types/testdata/importdecl1a.src
new file mode 100644
index 0000000000000000000000000000000000000000..8301820dda981b6ee4faf0afa53ed0e5e6085069
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/importdecl1a.src
@@ -0,0 +1,11 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test case for issue 8969.
+
+package importdecl1
+
+import . "unsafe"
+
+var _ Pointer // use dot-imported package unsafe
diff --git a/llgo/third_party/go.tools/go/types/testdata/importdecl1b.src b/llgo/third_party/go.tools/go/types/testdata/importdecl1b.src
new file mode 100644
index 0000000000000000000000000000000000000000..f24bb9ade977749ce379a08594cf44194dff5b78
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/importdecl1b.src
@@ -0,0 +1,7 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package importdecl1
+
+import . /* ERROR "imported but not used" */ "unsafe"
diff --git a/llgo/third_party/go.tools/go/types/testdata/init0.src b/llgo/third_party/go.tools/go/types/testdata/init0.src
new file mode 100644
index 0000000000000000000000000000000000000000..ef0349c70f5ff053f71fbceaae659093d8610a22
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/init0.src
@@ -0,0 +1,106 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// initialization cycles
+
+package init0
+
+// initialization cycles (we don't know the types)
+const (
+ s0 /* ERROR initialization cycle */ = s0
+
+ x0 /* ERROR initialization cycle */ = y0
+ y0 = x0
+
+ a0 = b0
+ b0 /* ERROR initialization cycle */ = c0
+ c0 = d0
+ d0 = b0
+)
+
+var (
+ s1 /* ERROR initialization cycle */ = s1
+
+ x1 /* ERROR initialization cycle */ = y1
+ y1 = x1
+
+ a1 = b1
+ b1 /* ERROR initialization cycle */ = c1
+ c1 = d1
+ d1 = b1
+)
+
+// initialization cycles (we know the types)
+const (
+ s2 /* ERROR initialization cycle */ int = s2
+
+ x2 /* ERROR initialization cycle */ int = y2
+ y2 = x2
+
+ a2 = b2
+ b2 /* ERROR initialization cycle */ int = c2
+ c2 = d2
+ d2 = b2
+)
+
+var (
+ s3 /* ERROR initialization cycle */ int = s3
+
+ x3 /* ERROR initialization cycle */ int = y3
+ y3 = x3
+
+ a3 = b3
+ b3 /* ERROR initialization cycle */ int = c3
+ c3 = d3
+ d3 = b3
+)
+
+// cycles via struct fields
+
+type S1 struct {
+ f int
+}
+const cx3 S1 /* ERROR invalid constant type */ = S1{cx3.f}
+var vx3 /* ERROR initialization cycle */ S1 = S1{vx3.f}
+
+// cycles via functions
+
+var x4 = x5
+var x5 /* ERROR initialization cycle */ = f1()
+func f1() int { return x5*10 }
+
+var x6, x7 /* ERROR initialization cycle */ = f2()
+var x8 = x7
+func f2() (int, int) { return f3() + f3(), 0 }
+func f3() int { return x8 }
+
+// cycles via closures
+
+var x9 /* ERROR initialization cycle */ = func() int { return x9 }()
+
+var x10 /* ERROR initialization cycle */ = f4()
+
+func f4() int {
+ _ = func() {
+ _ = x10
+ }
+ return 0
+}
+
+// cycles via method expressions
+
+type T1 struct{}
+
+func (T1) m() bool { _ = x11; return false }
+
+var x11 /* ERROR initialization cycle */ = T1.m(T1{})
+
+// cycles via method values
+
+type T2 struct{}
+
+func (T2) m() bool { _ = x12; return false }
+
+var t1 T2
+var x12 /* ERROR initialization cycle */ = t1.m
diff --git a/llgo/third_party/go.tools/go/types/testdata/init1.src b/llgo/third_party/go.tools/go/types/testdata/init1.src
new file mode 100644
index 0000000000000000000000000000000000000000..39ca31466b5ebc0bc899b82d292c2f6e3ea32f43
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/init1.src
@@ -0,0 +1,97 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// initialization cycles
+
+package init1
+
+// issue 6683 (marked as WorkingAsIntended)
+
+type T0 struct{}
+
+func (T0) m() int { return y0 }
+
+var x0 = T0{}
+
+var y0 /* ERROR initialization cycle */ = x0.m()
+
+type T1 struct{}
+
+func (T1) m() int { return y1 }
+
+var x1 interface {
+ m() int
+} = T1{}
+
+var y1 = x1.m() // no cycle reported, x1 is of interface type
+
+// issue 6703 (modified)
+
+var x2 /* ERROR initialization cycle */ = T2.m
+
+var y2 = x2
+
+type T2 struct{}
+
+func (T2) m() int {
+ _ = y2
+ return 0
+}
+
+var x3 /* ERROR initialization cycle */ = T3.m(T3{}) // <<<< added (T3{})
+
+var y3 = x3
+
+type T3 struct{}
+
+func (T3) m() int {
+ _ = y3
+ return 0
+}
+
+var x4 /* ERROR initialization cycle */ = T4{}.m // <<<< added {}
+
+var y4 = x4
+
+type T4 struct{}
+
+func (T4) m() int {
+ _ = y4
+ return 0
+}
+
+var x5 /* ERROR initialization cycle */ = T5{}.m() // <<<< added ()
+
+var y5 = x5
+
+type T5 struct{}
+
+func (T5) m() int {
+ _ = y5
+ return 0
+}
+
+// issue 4847
+// simplified test case
+
+var x6 = f6
+var y6 /* ERROR initialization cycle */ = f6
+func f6() { _ = y6 }
+
+// full test case
+
+type (
+ E int
+ S int
+)
+
+type matcher func(s *S) E
+
+func matchList(s *S) E { return matcher(matchAnyFn)(s) }
+
+var foo = matcher(matchList)
+
+var matchAny /* ERROR initialization cycle */ = matcher(matchList)
+
+func matchAnyFn(s *S) (err E) { return matchAny(s) }
\ No newline at end of file
diff --git a/llgo/third_party/go.tools/go/types/testdata/init2.src b/llgo/third_party/go.tools/go/types/testdata/init2.src
new file mode 100644
index 0000000000000000000000000000000000000000..614db6c949147bd7a4c78bd8e3a5fc4ae5916856
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/init2.src
@@ -0,0 +1,139 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// initialization cycles
+
+package init2
+
+// cycles through functions
+
+func f1() int { _ = x1; return 0 }
+var x1 /* ERROR initialization cycle */ = f1
+
+func f2() int { _ = x2; return 0 }
+var x2 /* ERROR initialization cycle */ = f2()
+
+// cycles through method expressions
+
+type T3 int
+func (T3) m() int { _ = x3; return 0 }
+var x3 /* ERROR initialization cycle */ = T3.m
+
+type T4 int
+func (T4) m() int { _ = x4; return 0 }
+var x4 /* ERROR initialization cycle */ = T4.m(0)
+
+type T3p int
+func (*T3p) m() int { _ = x3p; return 0 }
+var x3p /* ERROR initialization cycle */ = (*T3p).m
+
+type T4p int
+func (*T4p) m() int { _ = x4p; return 0 }
+var x4p /* ERROR initialization cycle */ = (*T4p).m(nil)
+
+// cycles through method expressions of embedded methods
+
+type T5 struct { E5 }
+type E5 int
+func (E5) m() int { _ = x5; return 0 }
+var x5 /* ERROR initialization cycle */ = T5.m
+
+type T6 struct { E6 }
+type E6 int
+func (E6) m() int { _ = x6; return 0 }
+var x6 /* ERROR initialization cycle */ = T6.m(T6{0})
+
+type T5p struct { E5p }
+type E5p int
+func (*E5p) m() int { _ = x5p; return 0 }
+var x5p /* ERROR initialization cycle */ = (*T5p).m
+
+type T6p struct { E6p }
+type E6p int
+func (*E6p) m() int { _ = x6p; return 0 }
+var x6p /* ERROR initialization cycle */ = (*T6p).m(nil)
+
+// cycles through method values
+
+type T7 int
+func (T7) m() int { _ = x7; return 0 }
+var x7 /* ERROR initialization cycle */ = T7(0).m
+
+type T8 int
+func (T8) m() int { _ = x8; return 0 }
+var x8 /* ERROR initialization cycle */ = T8(0).m()
+
+type T7p int
+func (*T7p) m() int { _ = x7p; return 0 }
+var x7p /* ERROR initialization cycle */ = new(T7p).m
+
+type T8p int
+func (*T8p) m() int { _ = x8p; return 0 }
+var x8p /* ERROR initialization cycle */ = new(T8p).m()
+
+type T7v int
+func (T7v) m() int { _ = x7v; return 0 }
+var x7var T7v
+var x7v /* ERROR initialization cycle */ = x7var.m
+
+type T8v int
+func (T8v) m() int { _ = x8v; return 0 }
+var x8var T8v
+var x8v /* ERROR initialization cycle */ = x8var.m()
+
+type T7pv int
+func (*T7pv) m() int { _ = x7pv; return 0 }
+var x7pvar *T7pv
+var x7pv /* ERROR initialization cycle */ = x7pvar.m
+
+type T8pv int
+func (*T8pv) m() int { _ = x8pv; return 0 }
+var x8pvar *T8pv
+var x8pv /* ERROR initialization cycle */ = x8pvar.m()
+
+// cycles through method values of embedded methods
+
+type T9 struct { E9 }
+type E9 int
+func (E9) m() int { _ = x9; return 0 }
+var x9 /* ERROR initialization cycle */ = T9{0}.m
+
+type T10 struct { E10 }
+type E10 int
+func (E10) m() int { _ = x10; return 0 }
+var x10 /* ERROR initialization cycle */ = T10{0}.m()
+
+type T9p struct { E9p }
+type E9p int
+func (*E9p) m() int { _ = x9p; return 0 }
+var x9p /* ERROR initialization cycle */ = new(T9p).m
+
+type T10p struct { E10p }
+type E10p int
+func (*E10p) m() int { _ = x10p; return 0 }
+var x10p /* ERROR initialization cycle */ = new(T10p).m()
+
+type T9v struct { E9v }
+type E9v int
+func (E9v) m() int { _ = x9v; return 0 }
+var x9var T9v
+var x9v /* ERROR initialization cycle */ = x9var.m
+
+type T10v struct { E10v }
+type E10v int
+func (E10v) m() int { _ = x10v; return 0 }
+var x10var T10v
+var x10v /* ERROR initialization cycle */ = x10var.m()
+
+type T9pv struct { E9pv }
+type E9pv int
+func (*E9pv) m() int { _ = x9pv; return 0 }
+var x9pvar *T9pv
+var x9pv /* ERROR initialization cycle */ = x9pvar.m
+
+type T10pv struct { E10pv }
+type E10pv int
+func (*E10pv) m() int { _ = x10pv; return 0 }
+var x10pvar *T10pv
+var x10pv /* ERROR initialization cycle */ = x10pvar.m()
diff --git a/llgo/third_party/go.tools/go/types/testdata/issues.src b/llgo/third_party/go.tools/go/types/testdata/issues.src
new file mode 100644
index 0000000000000000000000000000000000000000..58c450f82f1ae01ac3e454d392a8b1e18a87637b
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/issues.src
@@ -0,0 +1,37 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issues
+
+import "fmt"
+
+func issue7035() {
+ type T struct{ X int }
+ _ = func() {
+ fmt.Println() // must refer to imported fmt rather than the fmt below
+ }
+ fmt := new(T)
+ _ = fmt.X
+}
+
+func issue8066() {
+ const (
+ // TODO(gri) Enable test below for releases 1.4 and higher
+ // _ = float32(340282356779733661637539395458142568447)
+ _ = float32(340282356779733661637539395458142568448 /* ERROR cannot convert */ )
+ )
+}
+
+// Check that a missing identifier doesn't lead to a spurious error cascade.
+func issue8799a() {
+ x, ok := missing /* ERROR undeclared */ ()
+ _ = !ok
+ _ = x
+}
+
+func issue8799b(x int, ok bool) {
+ x, ok = missing /* ERROR undeclared */ ()
+ _ = !ok
+ _ = x
+}
diff --git a/llgo/third_party/go.tools/go/types/testdata/labels.src b/llgo/third_party/go.tools/go/types/testdata/labels.src
new file mode 100644
index 0000000000000000000000000000000000000000..102ffc7c17b015726b0acd1b9a9836f2cd5c5ff2
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/labels.src
@@ -0,0 +1,207 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a modified concatenation of the files
+// $GOROOT/test/label.go and $GOROOT/test/label1.go.
+
+package labels
+
+var x int
+
+func f0() {
+L1 /* ERROR "label L1 declared but not used" */ :
+ for {
+ }
+L2 /* ERROR "label L2 declared but not used" */ :
+ select {
+ }
+L3 /* ERROR "label L3 declared but not used" */ :
+ switch {
+ }
+L4 /* ERROR "label L4 declared but not used" */ :
+ if true {
+ }
+L5 /* ERROR "label L5 declared but not used" */ :
+ f0()
+L6:
+ f0()
+L6 /* ERROR "label L6 already declared" */ :
+ f0()
+ if x == 20 {
+ goto L6
+ }
+
+L7:
+ for {
+ break L7
+ break L8 /* ERROR "invalid break label L8" */
+ }
+
+// A label must be directly associated with a switch, select, or
+// for statement; it cannot be the label of a labeled statement.
+
+L7a /* ERROR "declared but not used" */ : L7b:
+ for {
+ break L7a /* ERROR "invalid break label L7a" */
+ continue L7a /* ERROR "invalid continue label L7a" */
+ continue L7b
+ }
+
+L8:
+ for {
+ if x == 21 {
+ continue L8
+ continue L7 /* ERROR "invalid continue label L7" */
+ }
+ }
+
+L9:
+ switch {
+ case true:
+ break L9
+ defalt /* ERROR "label defalt declared but not used" */ :
+ }
+
+L10:
+ select {
+ default:
+ break L10
+ break L9 /* ERROR "invalid break label L9" */
+ }
+
+ goto L10a
+L10a: L10b:
+ select {
+ default:
+ break L10a /* ERROR "invalid break label L10a" */
+ break L10b
+ continue L10b /* ERROR "invalid continue label L10b" */
+ }
+}
+
+func f1() {
+L1:
+ for {
+ if x == 0 {
+ break L1
+ }
+ if x == 1 {
+ continue L1
+ }
+ goto L1
+ }
+
+L2:
+ select {
+ default:
+ if x == 0 {
+ break L2
+ }
+ if x == 1 {
+ continue L2 /* ERROR "invalid continue label L2" */
+ }
+ goto L2
+ }
+
+L3:
+ switch {
+ case x > 10:
+ if x == 11 {
+ break L3
+ }
+ if x == 12 {
+ continue L3 /* ERROR "invalid continue label L3" */
+ }
+ goto L3
+ }
+
+L4:
+ if true {
+ if x == 13 {
+ break L4 /* ERROR "invalid break label L4" */
+ }
+ if x == 14 {
+ continue L4 /* ERROR "invalid continue label L4" */
+ }
+ if x == 15 {
+ goto L4
+ }
+ }
+
+L5:
+ f1()
+ if x == 16 {
+ break L5 /* ERROR "invalid break label L5" */
+ }
+ if x == 17 {
+ continue L5 /* ERROR "invalid continue label L5" */
+ }
+ if x == 18 {
+ goto L5
+ }
+
+ for {
+ if x == 19 {
+ break L1 /* ERROR "invalid break label L1" */
+ }
+ if x == 20 {
+ continue L1 /* ERROR "invalid continue label L1" */
+ }
+ if x == 21 {
+ goto L1
+ }
+ }
+}
+
+// Additional tests not in the original files.
+
+func f2() {
+L1 /* ERROR "label L1 declared but not used" */ :
+ if x == 0 {
+ for {
+ continue L1 /* ERROR "invalid continue label L1" */
+ }
+ }
+}
+
+func f3() {
+L1:
+L2:
+L3:
+ for {
+ break L1 /* ERROR "invalid break label L1" */
+ break L2 /* ERROR "invalid break label L2" */
+ break L3
+ continue L1 /* ERROR "invalid continue label L1" */
+ continue L2 /* ERROR "invalid continue label L2" */
+ continue L3
+ goto L1
+ goto L2
+ goto L3
+ }
+}
+
+// Blank labels are never declared.
+
+func f4() {
+_:
+_: // multiple blank labels are ok
+ goto _ /* ERROR "label _ not declared" */
+}
+
+func f5() {
+_:
+ for {
+ break _ /* ERROR "invalid break label _" */
+ continue _ /* ERROR "invalid continue label _" */
+ }
+}
+
+func f6() {
+_:
+ switch {
+ default:
+ break _ /* ERROR "invalid break label _" */
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/testdata/methodsets.src b/llgo/third_party/go.tools/go/types/testdata/methodsets.src
new file mode 100644
index 0000000000000000000000000000000000000000..89211468ead01f98e89c470914e1f870f5e1ad6d
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/methodsets.src
@@ -0,0 +1,214 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package methodsets
+
+type T0 struct {}
+
+func (T0) v0() {}
+func (*T0) p0() {}
+
+type T1 struct {} // like T0 with different method names
+
+func (T1) v1() {}
+func (*T1) p1() {}
+
+type T2 interface {
+ v2()
+ p2()
+}
+
+type T3 struct {
+ T0
+ *T1
+ T2
+}
+
+// Method expressions
+func _() {
+ var (
+ _ func(T0) = T0.v0
+ _ = T0 /* ERROR "not in method set" */ .p0
+
+ _ func (*T0) = (*T0).v0
+ _ func (*T0) = (*T0).p0
+
+ // T1 is like T0
+
+ _ func(T2) = T2.v2
+ _ func(T2) = T2.p2
+
+ _ func(T3) = T3.v0
+ _ func(T3) = T3 /* ERROR "not in method set" */ .p0
+ _ func(T3) = T3.v1
+ _ func(T3) = T3.p1
+ _ func(T3) = T3.v2
+ _ func(T3) = T3.p2
+
+ _ func(*T3) = (*T3).v0
+ _ func(*T3) = (*T3).p0
+ _ func(*T3) = (*T3).v1
+ _ func(*T3) = (*T3).p1
+ _ func(*T3) = (*T3).v2
+ _ func(*T3) = (*T3).p2
+ )
+}
+
+// Method values with addressable receivers
+func _() {
+ var (
+ v0 T0
+ _ func() = v0.v0
+ _ func() = v0.p0
+ )
+
+ var (
+ p0 *T0
+ _ func() = p0.v0
+ _ func() = p0.p0
+ )
+
+ // T1 is like T0
+
+ var (
+ v2 T2
+ _ func() = v2.v2
+ _ func() = v2.p2
+ )
+
+ var (
+ v4 T3
+ _ func() = v4.v0
+ _ func() = v4.p0
+ _ func() = v4.v1
+ _ func() = v4.p1
+ _ func() = v4.v2
+ _ func() = v4.p2
+ )
+
+ var (
+ p4 *T3
+ _ func() = p4.v0
+ _ func() = p4.p0
+ _ func() = p4.v1
+ _ func() = p4.p1
+ _ func() = p4.v2
+ _ func() = p4.p2
+ )
+}
+
+// Method calls with addressable receivers
+func _() {
+ var v0 T0
+ v0.v0()
+ v0.p0()
+
+ var p0 *T0
+ p0.v0()
+ p0.p0()
+
+ // T1 is like T0
+
+ var v2 T2
+ v2.v2()
+ v2.p2()
+
+ var v4 T3
+ v4.v0()
+ v4.p0()
+ v4.v1()
+ v4.p1()
+ v4.v2()
+ v4.p2()
+
+ var p4 *T3
+ p4.v0()
+ p4.p0()
+ p4.v1()
+ p4.p1()
+ p4.v2()
+ p4.p2()
+}
+
+// Method values with value receivers
+func _() {
+ var (
+ _ func() = T0{}.v0
+ _ func() = T0 /* ERROR "not in method set" */ {}.p0
+
+ _ func() = (&T0{}).v0
+ _ func() = (&T0{}).p0
+
+ // T1 is like T0
+
+ // no values for T2
+
+ _ func() = T3{}.v0
+ _ func() = T3 /* ERROR "not in method set" */ {}.p0
+ _ func() = T3{}.v1
+ _ func() = T3{}.p1
+ _ func() = T3{}.v2
+ _ func() = T3{}.p2
+
+ _ func() = (&T3{}).v0
+ _ func() = (&T3{}).p0
+ _ func() = (&T3{}).v1
+ _ func() = (&T3{}).p1
+ _ func() = (&T3{}).v2
+ _ func() = (&T3{}).p2
+ )
+}
+
+// Method calls with value receivers
+func _() {
+ T0{}.v0()
+ T0 /* ERROR "not in method set" */ {}.p0()
+
+ (&T0{}).v0()
+ (&T0{}).p0()
+
+ // T1 is like T0
+
+ // no values for T2
+
+ T3{}.v0()
+ T3 /* ERROR "not in method set" */ {}.p0()
+ T3{}.v1()
+ T3{}.p1()
+ T3{}.v2()
+ T3{}.p2()
+
+ (&T3{}).v0()
+ (&T3{}).p0()
+ (&T3{}).v1()
+ (&T3{}).p1()
+ (&T3{}).v2()
+ (&T3{}).p2()
+}
+
+// *T has no methods if T is an interface type
+func issue5918() {
+ var (
+ err error
+ _ = err.Error()
+ _ func() string = err.Error
+ _ func(error) string = error.Error
+
+ perr = &err
+ _ = perr /* ERROR "no field or method" */ .Error()
+ _ func() string = perr /* ERROR "no field or method" */ .Error
+ _ func(*error) string = ( /* ERROR "no field or method" */ *error).Error
+ )
+
+ type T *interface{ m() int }
+ var (
+ x T
+ _ = (*x).m()
+ _ = (*x).m
+
+ _ = x /* ERROR "no field or method" */ .m()
+ _ = x /* ERROR "no field or method" */ .m
+ _ = T /* ERROR "no field or method" */ .m
+ )
+}
diff --git a/llgo/third_party/go.tools/go/types/testdata/shifts.src b/llgo/third_party/go.tools/go/types/testdata/shifts.src
new file mode 100644
index 0000000000000000000000000000000000000000..7f8ed06fbfabc0a62a4c68bdaf544bc1b66194a6
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/testdata/shifts.src
@@ -0,0 +1,321 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package shifts
+
+func shifts0() {
+ // basic constant shifts
+ const (
+ s = 10
+ _ = 0<<0
+ _ = 1<> s)
+ _, _, _ = u, v, x
+}
+
+func shifts4() {
+ // shifts in comparisons w/ untyped operands
+ var s uint
+
+ _ = 1< len(fields) {
+ panic("more tags than fields")
+ }
+ return &Struct{fields: fields, tags: tags}
+}
+
+// NumFields returns the number of fields in the struct (including blank and anonymous fields).
+func (s *Struct) NumFields() int { return len(s.fields) }
+
+// Field returns the i'th field for 0 <= i < NumFields().
+func (s *Struct) Field(i int) *Var { return s.fields[i] }
+
+// Tag returns the i'th field tag for 0 <= i < NumFields().
+func (s *Struct) Tag(i int) string {
+ if i < len(s.tags) {
+ return s.tags[i]
+ }
+ return ""
+}
+
+// A Pointer represents a pointer type.
+type Pointer struct {
+ base Type // element type
+}
+
+// NewPointer returns a new pointer type for the given element (base) type.
+func NewPointer(elem Type) *Pointer { return &Pointer{base: elem} }
+
+// Elem returns the element type for the given pointer p.
+func (p *Pointer) Elem() Type { return p.base }
+
+// A Tuple represents an ordered list of variables; a nil *Tuple is a valid (empty) tuple.
+// Tuples are used as components of signatures and to represent the type of multiple
+// assignments; they are not first class types of Go.
+type Tuple struct {
+ vars []*Var
+}
+
+// NewTuple returns a new tuple for the given variables.
+func NewTuple(x ...*Var) *Tuple {
+ if len(x) > 0 {
+ return &Tuple{x}
+ }
+ return nil
+}
+
+// Len returns the number variables of tuple t.
+func (t *Tuple) Len() int {
+ if t != nil {
+ return len(t.vars)
+ }
+ return 0
+}
+
+// At returns the i'th variable of tuple t.
+func (t *Tuple) At(i int) *Var { return t.vars[i] }
+
+// A Signature represents a (non-builtin) function or method type.
+type Signature struct {
+ scope *Scope // function scope, always present
+ recv *Var // nil if not a method
+ params *Tuple // (incoming) parameters from left to right; or nil
+ results *Tuple // (outgoing) results from left to right; or nil
+ variadic bool // true if the last parameter's type is of the form ...T (or string, for append built-in only)
+}
+
+// NewSignature returns a new function type for the given receiver, parameters,
+// and results, either of which may be nil. If variadic is set, the function
+// is variadic, it must have at least one parameter, and the last parameter
+// must be of unnamed slice type.
+func NewSignature(scope *Scope, recv *Var, params, results *Tuple, variadic bool) *Signature {
+ // TODO(gri) Should we rely on the correct (non-nil) incoming scope
+ // or should this function allocate and populate a scope?
+ if variadic {
+ n := params.Len()
+ if n == 0 {
+ panic("types.NewSignature: variadic function must have at least one parameter")
+ }
+ if _, ok := params.At(n - 1).typ.(*Slice); !ok {
+ panic("types.NewSignature: variadic parameter must be of unnamed slice type")
+ }
+ }
+ return &Signature{scope, recv, params, results, variadic}
+}
+
+// Recv returns the receiver of signature s (if a method), or nil if a
+// function.
+//
+// For an abstract method, Recv returns the enclosing interface either
+// as a *Named or an *Interface. Due to embedding, an interface may
+// contain methods whose receiver type is a different interface.
+func (s *Signature) Recv() *Var { return s.recv }
+
+// Params returns the parameters of signature s, or nil.
+func (s *Signature) Params() *Tuple { return s.params }
+
+// Results returns the results of signature s, or nil.
+func (s *Signature) Results() *Tuple { return s.results }
+
+// Variadic reports whether the signature s is variadic.
+func (s *Signature) Variadic() bool { return s.variadic }
+
+// An Interface represents an interface type.
+type Interface struct {
+ methods []*Func // ordered list of explicitly declared methods
+ embeddeds []*Named // ordered list of explicitly embedded types
+
+ allMethods []*Func // ordered list of methods declared with or embedded in this interface (TODO(gri): replace with mset)
+}
+
+// NewInterface returns a new interface for the given methods and embedded types.
+func NewInterface(methods []*Func, embeddeds []*Named) *Interface {
+ typ := new(Interface)
+
+ var mset objset
+ for _, m := range methods {
+ if mset.insert(m) != nil {
+ panic("multiple methods with the same name")
+ }
+ // set receiver
+ // TODO(gri) Ideally, we should use a named type here instead of
+ // typ, for less verbose printing of interface method signatures.
+ m.typ.(*Signature).recv = NewVar(m.pos, m.pkg, "", typ)
+ }
+ sort.Sort(byUniqueMethodName(methods))
+
+ if embeddeds == nil {
+ sort.Sort(byUniqueTypeName(embeddeds))
+ }
+
+ typ.methods = methods
+ typ.embeddeds = embeddeds
+ return typ
+}
+
+// NumExplicitMethods returns the number of explicitly declared methods of interface t.
+func (t *Interface) NumExplicitMethods() int { return len(t.methods) }
+
+// ExplicitMethod returns the i'th explicitly declared method of interface t for 0 <= i < t.NumExplicitMethods().
+// The methods are ordered by their unique Id.
+func (t *Interface) ExplicitMethod(i int) *Func { return t.methods[i] }
+
+// NumEmbeddeds returns the number of embedded types in interface t.
+func (t *Interface) NumEmbeddeds() int { return len(t.embeddeds) }
+
+// Embedded returns the i'th embedded type of interface t for 0 <= i < t.NumEmbeddeds().
+// The types are ordered by the corresponding TypeName's unique Id.
+func (t *Interface) Embedded(i int) *Named { return t.embeddeds[i] }
+
+// NumMethods returns the total number of methods of interface t.
+func (t *Interface) NumMethods() int { return len(t.allMethods) }
+
+// Method returns the i'th method of interface t for 0 <= i < t.NumMethods().
+// The methods are ordered by their unique Id.
+func (t *Interface) Method(i int) *Func { return t.allMethods[i] }
+
+// Empty returns true if t is the empty interface.
+func (t *Interface) Empty() bool { return len(t.allMethods) == 0 }
+
+// Complete computes the interface's method set. It must be called by users of
+// NewInterface after the interface's embedded types are fully defined and
+// before using the interface type in any way other than to form other types.
+// Complete returns the receiver.
+func (t *Interface) Complete() *Interface {
+ if t.allMethods != nil {
+ return t
+ }
+
+ var allMethods []*Func
+ if t.embeddeds == nil {
+ if t.methods == nil {
+ allMethods = make([]*Func, 0, 1)
+ } else {
+ allMethods = t.methods
+ }
+ } else {
+ allMethods = append(allMethods, t.methods...)
+ for _, et := range t.embeddeds {
+ it := et.Underlying().(*Interface)
+ it.Complete()
+ for _, tm := range it.allMethods {
+ // Make a copy of the method and adjust its receiver type.
+ newm := *tm
+ newmtyp := *tm.typ.(*Signature)
+ newm.typ = &newmtyp
+ newmtyp.recv = NewVar(newm.pos, newm.pkg, "", t)
+ allMethods = append(allMethods, &newm)
+ }
+ }
+ sort.Sort(byUniqueMethodName(allMethods))
+ }
+ t.allMethods = allMethods
+
+ return t
+}
+
+// A Map represents a map type.
+type Map struct {
+ key, elem Type
+}
+
+// NewMap returns a new map for the given key and element types.
+func NewMap(key, elem Type) *Map {
+ return &Map{key, elem}
+}
+
+// Key returns the key type of map m.
+func (m *Map) Key() Type { return m.key }
+
+// Elem returns the element type of map m.
+func (m *Map) Elem() Type { return m.elem }
+
+// A Chan represents a channel type.
+type Chan struct {
+ dir ChanDir
+ elem Type
+}
+
+// A ChanDir value indicates a channel direction.
+type ChanDir int
+
+// The direction of a channel is indicated by one of the following constants.
+const (
+ SendRecv ChanDir = iota
+ SendOnly
+ RecvOnly
+)
+
+// NewChan returns a new channel type for the given direction and element type.
+func NewChan(dir ChanDir, elem Type) *Chan {
+ return &Chan{dir, elem}
+}
+
+// Dir returns the direction of channel c.
+func (c *Chan) Dir() ChanDir { return c.dir }
+
+// Elem returns the element type of channel c.
+func (c *Chan) Elem() Type { return c.elem }
+
+// A Named represents a named type.
+type Named struct {
+ obj *TypeName // corresponding declared object
+ underlying Type // possibly a *Named during setup; never a *Named once set up completely
+ methods []*Func // methods declared for this type (not the method set of this type)
+}
+
+// NewNamed returns a new named type for the given type name, underlying type, and associated methods.
+// The underlying type must not be a *Named.
+func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
+ if _, ok := underlying.(*Named); ok {
+ panic("types.NewNamed: underlying type must not be *Named")
+ }
+ typ := &Named{obj: obj, underlying: underlying, methods: methods}
+ if obj.typ == nil {
+ obj.typ = typ
+ }
+ return typ
+}
+
+// TypeName returns the type name for the named type t.
+func (t *Named) Obj() *TypeName { return t.obj }
+
+// NumMethods returns the number of explicit methods whose receiver is named type t.
+func (t *Named) NumMethods() int { return len(t.methods) }
+
+// Method returns the i'th method of named type t for 0 <= i < t.NumMethods().
+func (t *Named) Method(i int) *Func { return t.methods[i] }
+
+// SetUnderlying sets the underlying type and marks t as complete.
+// TODO(gri) determine if there's a better solution rather than providing this function
+func (t *Named) SetUnderlying(underlying Type) {
+ if underlying == nil {
+ panic("types.Named.SetUnderlying: underlying type must not be nil")
+ }
+ if _, ok := underlying.(*Named); ok {
+ panic("types.Named.SetUnderlying: underlying type must not be *Named")
+ }
+ t.underlying = underlying
+}
+
+// AddMethod adds method m unless it is already in the method list.
+// TODO(gri) find a better solution instead of providing this function
+func (t *Named) AddMethod(m *Func) {
+ if i, _ := lookupMethod(t.methods, m.pkg, m.name); i < 0 {
+ t.methods = append(t.methods, m)
+ }
+}
+
+// Implementations for Type methods.
+
+func (t *Basic) Underlying() Type { return t }
+func (t *Array) Underlying() Type { return t }
+func (t *Slice) Underlying() Type { return t }
+func (t *Struct) Underlying() Type { return t }
+func (t *Pointer) Underlying() Type { return t }
+func (t *Tuple) Underlying() Type { return t }
+func (t *Signature) Underlying() Type { return t }
+func (t *Interface) Underlying() Type { return t }
+func (t *Map) Underlying() Type { return t }
+func (t *Chan) Underlying() Type { return t }
+func (t *Named) Underlying() Type { return t.underlying }
+
+func (t *Basic) String() string { return TypeString(nil, t) }
+func (t *Array) String() string { return TypeString(nil, t) }
+func (t *Slice) String() string { return TypeString(nil, t) }
+func (t *Struct) String() string { return TypeString(nil, t) }
+func (t *Pointer) String() string { return TypeString(nil, t) }
+func (t *Tuple) String() string { return TypeString(nil, t) }
+func (t *Signature) String() string { return TypeString(nil, t) }
+func (t *Interface) String() string { return TypeString(nil, t) }
+func (t *Map) String() string { return TypeString(nil, t) }
+func (t *Chan) String() string { return TypeString(nil, t) }
+func (t *Named) String() string { return TypeString(nil, t) }
diff --git a/llgo/third_party/go.tools/go/types/typestring.go b/llgo/third_party/go.tools/go/types/typestring.go
new file mode 100644
index 0000000000000000000000000000000000000000..9a537e8177372a73bee0b86c917bcadea16ecc86
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/typestring.go
@@ -0,0 +1,266 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements printing of types.
+
+package types
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// If GcCompatibilityMode is set, printing of types is modified
+// to match the representation of some types in the gc compiler:
+//
+// - byte and rune lose their alias name and simply stand for
+// uint8 and int32 respectively
+// - embedded interfaces get flattened (the embedding info is lost,
+// and certain recursive interface types cannot be printed anymore)
+//
+// This makes it easier to compare packages computed with the type-
+// checker vs packages imported from gc export data.
+//
+// Caution: This flag affects all uses of WriteType, globally.
+// It is only provided for testing in conjunction with
+// gc-generated data. It may be removed at any time.
+var GcCompatibilityMode bool
+
+// TypeString returns the string representation of typ.
+// Named types are printed package-qualified if they
+// do not belong to this package.
+func TypeString(this *Package, typ Type) string {
+ var buf bytes.Buffer
+ WriteType(&buf, this, typ)
+ return buf.String()
+}
+
+// WriteType writes the string representation of typ to buf.
+// Named types are printed package-qualified if they
+// do not belong to this package.
+func WriteType(buf *bytes.Buffer, this *Package, typ Type) {
+ writeType(buf, this, typ, make([]Type, 8))
+}
+
+func writeType(buf *bytes.Buffer, this *Package, typ Type, visited []Type) {
+ // Theoretically, this is a quadratic lookup algorithm, but in
+ // practice deeply nested composite types with unnamed component
+ // types are uncommon. This code is likely more efficient than
+ // using a map.
+ for _, t := range visited {
+ if t == typ {
+ fmt.Fprintf(buf, "○%T", typ) // cycle to typ
+ return
+ }
+ }
+ visited = append(visited, typ)
+
+ switch t := typ.(type) {
+ case nil:
+ buf.WriteString("")
+
+ case *Basic:
+ if t.kind == UnsafePointer {
+ buf.WriteString("unsafe.")
+ }
+ if GcCompatibilityMode {
+ // forget the alias names
+ switch t.kind {
+ case Byte:
+ t = Typ[Uint8]
+ case Rune:
+ t = Typ[Int32]
+ }
+ }
+ buf.WriteString(t.name)
+
+ case *Array:
+ fmt.Fprintf(buf, "[%d]", t.len)
+ writeType(buf, this, t.elem, visited)
+
+ case *Slice:
+ buf.WriteString("[]")
+ writeType(buf, this, t.elem, visited)
+
+ case *Struct:
+ buf.WriteString("struct{")
+ for i, f := range t.fields {
+ if i > 0 {
+ buf.WriteString("; ")
+ }
+ if !f.anonymous {
+ buf.WriteString(f.name)
+ buf.WriteByte(' ')
+ }
+ writeType(buf, this, f.typ, visited)
+ if tag := t.Tag(i); tag != "" {
+ fmt.Fprintf(buf, " %q", tag)
+ }
+ }
+ buf.WriteByte('}')
+
+ case *Pointer:
+ buf.WriteByte('*')
+ writeType(buf, this, t.base, visited)
+
+ case *Tuple:
+ writeTuple(buf, this, t, false, visited)
+
+ case *Signature:
+ buf.WriteString("func")
+ writeSignature(buf, this, t, visited)
+
+ case *Interface:
+ // We write the source-level methods and embedded types rather
+ // than the actual method set since resolved method signatures
+ // may have non-printable cycles if parameters have anonymous
+ // interface types that (directly or indirectly) embed the
+ // current interface. For instance, consider the result type
+ // of m:
+ //
+ // type T interface{
+ // m() interface{ T }
+ // }
+ //
+ buf.WriteString("interface{")
+ if GcCompatibilityMode {
+ // print flattened interface
+ // (useful to compare against gc-generated interfaces)
+ for i, m := range t.allMethods {
+ if i > 0 {
+ buf.WriteString("; ")
+ }
+ buf.WriteString(m.name)
+ writeSignature(buf, this, m.typ.(*Signature), visited)
+ }
+ } else {
+ // print explicit interface methods and embedded types
+ for i, m := range t.methods {
+ if i > 0 {
+ buf.WriteString("; ")
+ }
+ buf.WriteString(m.name)
+ writeSignature(buf, this, m.typ.(*Signature), visited)
+ }
+ for i, typ := range t.embeddeds {
+ if i > 0 || len(t.methods) > 0 {
+ buf.WriteString("; ")
+ }
+ writeType(buf, this, typ, visited)
+ }
+ }
+ buf.WriteByte('}')
+
+ case *Map:
+ buf.WriteString("map[")
+ writeType(buf, this, t.key, visited)
+ buf.WriteByte(']')
+ writeType(buf, this, t.elem, visited)
+
+ case *Chan:
+ var s string
+ var parens bool
+ switch t.dir {
+ case SendRecv:
+ s = "chan "
+ // chan (<-chan T) requires parentheses
+ if c, _ := t.elem.(*Chan); c != nil && c.dir == RecvOnly {
+ parens = true
+ }
+ case SendOnly:
+ s = "chan<- "
+ case RecvOnly:
+ s = "<-chan "
+ default:
+ panic("unreachable")
+ }
+ buf.WriteString(s)
+ if parens {
+ buf.WriteByte('(')
+ }
+ writeType(buf, this, t.elem, visited)
+ if parens {
+ buf.WriteByte(')')
+ }
+
+ case *Named:
+ s := ""
+ if obj := t.obj; obj != nil {
+ if pkg := obj.pkg; pkg != nil && pkg != this {
+ buf.WriteString(pkg.path)
+ buf.WriteByte('.')
+ }
+ // TODO(gri): function-local named types should be displayed
+ // differently from named types at package level to avoid
+ // ambiguity.
+ s = obj.name
+ }
+ buf.WriteString(s)
+
+ default:
+ // For externally defined implementations of Type.
+ buf.WriteString(t.String())
+ }
+}
+
+func writeTuple(buf *bytes.Buffer, this *Package, tup *Tuple, variadic bool, visited []Type) {
+ buf.WriteByte('(')
+ if tup != nil {
+ for i, v := range tup.vars {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ if v.name != "" {
+ buf.WriteString(v.name)
+ buf.WriteByte(' ')
+ }
+ typ := v.typ
+ if variadic && i == len(tup.vars)-1 {
+ if s, ok := typ.(*Slice); ok {
+ buf.WriteString("...")
+ typ = s.elem
+ } else {
+ // special case:
+ // append(s, "foo"...) leads to signature func([]byte, string...)
+ if t, ok := typ.Underlying().(*Basic); !ok || t.kind != String {
+ panic("internal error: string type expected")
+ }
+ writeType(buf, this, typ, visited)
+ buf.WriteString("...")
+ continue
+ }
+ }
+ writeType(buf, this, typ, visited)
+ }
+ }
+ buf.WriteByte(')')
+}
+
+// WriteSignature writes the representation of the signature sig to buf,
+// without a leading "func" keyword.
+// Named types are printed package-qualified if they
+// do not belong to this package.
+func WriteSignature(buf *bytes.Buffer, this *Package, sig *Signature) {
+ writeSignature(buf, this, sig, make([]Type, 8))
+}
+
+func writeSignature(buf *bytes.Buffer, this *Package, sig *Signature, visited []Type) {
+ writeTuple(buf, this, sig.params, sig.variadic, visited)
+
+ n := sig.results.Len()
+ if n == 0 {
+ // no result
+ return
+ }
+
+ buf.WriteByte(' ')
+ if n == 1 && sig.results.vars[0].name == "" {
+ // single unnamed result
+ writeType(buf, this, sig.results.vars[0].typ, visited)
+ return
+ }
+
+ // multiple or named result(s)
+ writeTuple(buf, this, sig.results, false, visited)
+}
diff --git a/llgo/third_party/go.tools/go/types/typestring_test.go b/llgo/third_party/go.tools/go/types/typestring_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..508dbe6eaba4ce35f5e7229a29c17e2eb3495952
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/typestring_test.go
@@ -0,0 +1,158 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types_test
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "testing"
+
+ _ "llvm.org/llgo/third_party/go.tools/go/gcimporter"
+ . "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+const filename = ""
+
+func makePkg(t *testing.T, src string) (*Package, error) {
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, filename, src, parser.DeclarationErrors)
+ if err != nil {
+ return nil, err
+ }
+ // use the package name as package path
+ return Check(file.Name.Name, fset, []*ast.File{file})
+}
+
+type testEntry struct {
+ src, str string
+}
+
+// dup returns a testEntry where both src and str are the same.
+func dup(s string) testEntry {
+ return testEntry{s, s}
+}
+
+// types that don't depend on any other type declarations
+var independentTestTypes = []testEntry{
+ // basic types
+ dup("int"),
+ dup("float32"),
+ dup("string"),
+
+ // arrays
+ dup("[10]int"),
+
+ // slices
+ dup("[]int"),
+ dup("[][]int"),
+
+ // structs
+ dup("struct{}"),
+ dup("struct{x int}"),
+ {`struct {
+ x, y int
+ z float32 "foo"
+ }`, `struct{x int; y int; z float32 "foo"}`},
+ {`struct {
+ string
+ elems []complex128
+ }`, `struct{string; elems []complex128}`},
+
+ // pointers
+ dup("*int"),
+ dup("***struct{}"),
+ dup("*struct{a int; b float32}"),
+
+ // functions
+ dup("func()"),
+ dup("func(x int)"),
+ {"func(x, y int)", "func(x int, y int)"},
+ {"func(x, y int, z string)", "func(x int, y int, z string)"},
+ dup("func(int)"),
+ {"func(int, string, byte)", "func(int, string, byte)"},
+
+ dup("func() int"),
+ {"func() (string)", "func() string"},
+ dup("func() (u int)"),
+ {"func() (u, v int, w string)", "func() (u int, v int, w string)"},
+
+ dup("func(int) string"),
+ dup("func(x int) string"),
+ dup("func(x int) (u string)"),
+ {"func(x, y int) (u string)", "func(x int, y int) (u string)"},
+
+ dup("func(...int) string"),
+ dup("func(x ...int) string"),
+ dup("func(x ...int) (u string)"),
+ {"func(x, y ...int) (u string)", "func(x int, y ...int) (u string)"},
+
+ // interfaces
+ dup("interface{}"),
+ dup("interface{m()}"),
+ dup(`interface{String() string; m(int) float32}`),
+
+ // maps
+ dup("map[string]int"),
+ {"map[struct{x, y int}][]byte", "map[struct{x int; y int}][]byte"},
+
+ // channels
+ dup("chan<- chan int"),
+ dup("chan<- <-chan int"),
+ dup("<-chan <-chan int"),
+ dup("chan (<-chan int)"),
+ dup("chan<- func()"),
+ dup("<-chan []func() int"),
+}
+
+// types that depend on other type declarations (src in TestTypes)
+var dependentTestTypes = []testEntry{
+ // interfaces
+ dup(`interface{io.Reader; io.Writer}`),
+ dup(`interface{m() int; io.Writer}`),
+ {`interface{m() interface{T}}`, `interface{m() interface{p.T}}`},
+}
+
+func TestTypeString(t *testing.T) {
+ var tests []testEntry
+ tests = append(tests, independentTestTypes...)
+ tests = append(tests, dependentTestTypes...)
+
+ for _, test := range tests {
+ src := `package p; import "io"; type _ io.Writer; type T ` + test.src
+ pkg, err := makePkg(t, src)
+ if err != nil {
+ t.Errorf("%s: %s", src, err)
+ continue
+ }
+ typ := pkg.Scope().Lookup("T").Type().Underlying()
+ if got := typ.String(); got != test.str {
+ t.Errorf("%s: got %s, want %s", test.src, got, test.str)
+ }
+ }
+}
+
+func TestQualifiedTypeString(t *testing.T) {
+ p, _ := pkgFor("p.go", "package p; type T int", nil)
+ q, _ := pkgFor("q.go", "package q", nil)
+
+ pT := p.Scope().Lookup("T").Type()
+ for _, test := range []struct {
+ typ Type
+ this *Package
+ want string
+ }{
+ {pT, nil, "p.T"},
+ {pT, p, "T"},
+ {pT, q, "p.T"},
+ {NewPointer(pT), p, "*T"},
+ {NewPointer(pT), q, "*p.T"},
+ } {
+ if got := TypeString(test.this, test.typ); got != test.want {
+ t.Errorf("TypeString(%s, %s) = %s, want %s",
+ test.this, test.typ, got, test.want)
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/typeutil/example_test.go b/llgo/third_party/go.tools/go/types/typeutil/example_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..1eddedb71d620a3020f22d84976e81eac0729083
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/typeutil/example_test.go
@@ -0,0 +1,64 @@
+package typeutil_test
+
+import (
+ "fmt"
+ "sort"
+
+ "go/ast"
+ "go/parser"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llgo/third_party/go.tools/go/types/typeutil"
+)
+
+func ExampleMap() {
+ const source = `package P
+
+var X []string
+var Y []string
+
+const p, q = 1.0, 2.0
+
+func f(offset int32) (value byte, ok bool)
+func g(rune) (uint8, bool)
+`
+
+ // Parse and type-check the package.
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "P.go", source, 0)
+ if err != nil {
+ panic(err)
+ }
+ pkg, err := new(types.Config).Check("P", fset, []*ast.File{f}, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ scope := pkg.Scope()
+
+ // Group names of package-level objects by their type.
+ var namesByType typeutil.Map // value is []string
+ for _, name := range scope.Names() {
+ T := scope.Lookup(name).Type()
+
+ names, _ := namesByType.At(T).([]string)
+ names = append(names, name)
+ namesByType.Set(T, names)
+ }
+
+ // Format, sort, and print the map entries.
+ var lines []string
+ namesByType.Iterate(func(T types.Type, names interface{}) {
+ lines = append(lines, fmt.Sprintf("%s %s", names, T))
+ })
+ sort.Strings(lines)
+ for _, line := range lines {
+ fmt.Println(line)
+ }
+
+ // Output:
+ // [X Y] []string
+ // [f g] func(offset int32) (value byte, ok bool)
+ // [p q] untyped float
+}
diff --git a/llgo/third_party/go.tools/go/types/typeutil/imports.go b/llgo/third_party/go.tools/go/types/typeutil/imports.go
new file mode 100644
index 0000000000000000000000000000000000000000..2609f7f7ab18f059284d5bc14073a7dfbe372de2
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/typeutil/imports.go
@@ -0,0 +1,27 @@
+package typeutil
+
+import "llvm.org/llgo/third_party/go.tools/go/types"
+
+// Dependencies returns all dependencies of the specified packages.
+//
+// Dependent packages appear in topological order: if package P imports
+// package Q, Q appears earlier than P in the result.
+// The algorithm follows import statements in the order they
+// appear in the source code, so the result is a total order.
+//
+func Dependencies(pkgs ...*types.Package) []*types.Package {
+ var result []*types.Package
+ seen := make(map[*types.Package]bool)
+ var visit func(pkgs []*types.Package)
+ visit = func(pkgs []*types.Package) {
+ for _, p := range pkgs {
+ if !seen[p] {
+ seen[p] = true
+ visit(p.Imports())
+ result = append(result, p)
+ }
+ }
+ }
+ visit(pkgs)
+ return result
+}
diff --git a/llgo/third_party/go.tools/go/types/typeutil/imports_test.go b/llgo/third_party/go.tools/go/types/typeutil/imports_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..b0375ca33111d552f97dc1be98d65d4d1c9034db
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/typeutil/imports_test.go
@@ -0,0 +1,75 @@
+package typeutil_test
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "testing"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llgo/third_party/go.tools/go/types/typeutil"
+)
+
+func TestDependencies(t *testing.T) {
+ packages := make(map[string]*types.Package)
+ conf := types.Config{
+ Packages: packages,
+ Import: func(_ map[string]*types.Package, path string) (*types.Package, error) {
+ return packages[path], nil
+ },
+ }
+ fset := token.NewFileSet()
+
+ // All edges go to the right.
+ // /--D--B--A
+ // F \_C_/
+ // \__E_/
+ for i, content := range []string{
+ `package A`,
+ `package C; import (_ "A")`,
+ `package B; import (_ "A")`,
+ `package E; import (_ "C")`,
+ `package D; import (_ "B"; _ "C")`,
+ `package F; import (_ "D"; _ "E")`,
+ } {
+ f, err := parser.ParseFile(fset, fmt.Sprintf("%d.go", i), content, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pkg, err := conf.Check(f.Name.Name, fset, []*ast.File{f}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ packages[pkg.Path()] = pkg
+ }
+
+ for _, test := range []struct {
+ roots, want string
+ }{
+ {"A", "A"},
+ {"B", "AB"},
+ {"C", "AC"},
+ {"D", "ABCD"},
+ {"E", "ACE"},
+ {"F", "ABCDEF"},
+
+ {"BE", "ABCE"},
+ {"EB", "ACEB"},
+ {"DE", "ABCDE"},
+ {"ED", "ACEBD"},
+ {"EF", "ACEBDF"},
+ } {
+ var pkgs []*types.Package
+ for _, r := range test.roots {
+ pkgs = append(pkgs, conf.Packages[string(r)])
+ }
+ var got string
+ for _, p := range typeutil.Dependencies(pkgs...) {
+ got += p.Path()
+ }
+ if got != test.want {
+ t.Errorf("Dependencies(%q) = %q, want %q", test.roots, got, test.want)
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/typeutil/map.go b/llgo/third_party/go.tools/go/types/typeutil/map.go
new file mode 100644
index 0000000000000000000000000000000000000000..d14483660d1fa1b7545886ab6c11615b172dc106
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/typeutil/map.go
@@ -0,0 +1,314 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeutil defines various utilities for types, such as Map,
+// a mapping from types.Type to interface{} values.
+package typeutil
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// Map is a hash-table-based mapping from types (types.Type) to
+// arbitrary interface{} values. The concrete types that implement
+// the Type interface are pointers. Since they are not canonicalized,
+// == cannot be used to check for equivalence, and thus we cannot
+// simply use a Go map.
+//
+// Just as with map[K]V, a nil *Map is a valid empty map.
+//
+// Not thread-safe.
+//
+type Map struct {
+ hasher Hasher // shared by many Maps
+ table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
+ length int // number of map entries
+}
+
+// entry is an entry (key/value association) in a hash bucket.
+type entry struct {
+ key types.Type
+ value interface{}
+}
+
+// SetHasher sets the hasher used by Map.
+//
+// All Hashers are functionally equivalent but contain internal state
+// used to cache the results of hashing previously seen types.
+//
+// A single Hasher created by MakeHasher() may be shared among many
+// Maps. This is recommended if the instances have many keys in
+// common, as it will amortize the cost of hash computation.
+//
+// A Hasher may grow without bound as new types are seen. Even when a
+// type is deleted from the map, the Hasher never shrinks, since other
+// types in the map may reference the deleted type indirectly.
+//
+// Hashers are not thread-safe, and read-only operations such as
+// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
+// read-lock) is require around all Map operations if a shared
+// hasher is accessed from multiple threads.
+//
+// If SetHasher is not called, the Map will create a private hasher at
+// the first call to Insert.
+//
+func (m *Map) SetHasher(hasher Hasher) {
+ m.hasher = hasher
+}
+
+// Delete removes the entry with the given key, if any.
+// It returns true if the entry was found.
+//
+func (m *Map) Delete(key types.Type) bool {
+ if m != nil && m.table != nil {
+ hash := m.hasher.Hash(key)
+ bucket := m.table[hash]
+ for i, e := range bucket {
+ if e.key != nil && types.Identical(key, e.key) {
+ // We can't compact the bucket as it
+ // would disturb iterators.
+ bucket[i] = entry{}
+ m.length--
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// At returns the map entry for the given key.
+// The result is nil if the entry is not present.
+//
+func (m *Map) At(key types.Type) interface{} {
+ if m != nil && m.table != nil {
+ for _, e := range m.table[m.hasher.Hash(key)] {
+ if e.key != nil && types.Identical(key, e.key) {
+ return e.value
+ }
+ }
+ }
+ return nil
+}
+
+// Set sets the map entry for key to val,
+// and returns the previous entry, if any.
+func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) {
+ if m.table != nil {
+ hash := m.hasher.Hash(key)
+ bucket := m.table[hash]
+ var hole *entry
+ for i, e := range bucket {
+ if e.key == nil {
+ hole = &bucket[i]
+ } else if types.Identical(key, e.key) {
+ prev = e.value
+ bucket[i].value = value
+ return
+ }
+ }
+
+ if hole != nil {
+ *hole = entry{key, value} // overwrite deleted entry
+ } else {
+ m.table[hash] = append(bucket, entry{key, value})
+ }
+ } else {
+ if m.hasher.memo == nil {
+ m.hasher = MakeHasher()
+ }
+ hash := m.hasher.Hash(key)
+ m.table = map[uint32][]entry{hash: {entry{key, value}}}
+ }
+
+ m.length++
+ return
+}
+
+// Len returns the number of map entries.
+func (m *Map) Len() int {
+ if m != nil {
+ return m.length
+ }
+ return 0
+}
+
+// Iterate calls function f on each entry in the map in unspecified order.
+//
+// If f should mutate the map, Iterate provides the same guarantees as
+// Go maps: if f deletes a map entry that Iterate has not yet reached,
+// f will not be invoked for it, but if f inserts a map entry that
+// Iterate has not yet reached, whether or not f will be invoked for
+// it is unspecified.
+//
+func (m *Map) Iterate(f func(key types.Type, value interface{})) {
+ if m != nil {
+ for _, bucket := range m.table {
+ for _, e := range bucket {
+ if e.key != nil {
+ f(e.key, e.value)
+ }
+ }
+ }
+ }
+}
+
+// Keys returns a new slice containing the set of map keys.
+// The order is unspecified.
+func (m *Map) Keys() []types.Type {
+ keys := make([]types.Type, 0, m.Len())
+ m.Iterate(func(key types.Type, _ interface{}) {
+ keys = append(keys, key)
+ })
+ return keys
+}
+
+func (m *Map) toString(values bool) string {
+ if m == nil {
+ return "{}"
+ }
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, "{")
+ sep := ""
+ m.Iterate(func(key types.Type, value interface{}) {
+ fmt.Fprint(&buf, sep)
+ sep = ", "
+ fmt.Fprint(&buf, key)
+ if values {
+ fmt.Fprintf(&buf, ": %q", value)
+ }
+ })
+ fmt.Fprint(&buf, "}")
+ return buf.String()
+}
+
+// String returns a string representation of the map's entries.
+// Values are printed using fmt.Sprintf("%v", v).
+// Order is unspecified.
+//
+func (m *Map) String() string {
+ return m.toString(true)
+}
+
+// KeysString returns a string representation of the map's key set.
+// Order is unspecified.
+//
+func (m *Map) KeysString() string {
+ return m.toString(false)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Hasher
+
+// A Hasher maps each type to its hash value.
+// For efficiency, a hasher uses memoization; thus its memory
+// footprint grows monotonically over time.
+// Hashers are not thread-safe.
+// Hashers have reference semantics.
+// Call MakeHasher to create a Hasher.
+type Hasher struct {
+ memo map[types.Type]uint32
+}
+
+// MakeHasher returns a new Hasher instance.
+func MakeHasher() Hasher {
+ return Hasher{make(map[types.Type]uint32)}
+}
+
+// Hash computes a hash value for the given type t such that
+// Identical(t, t') => Hash(t) == Hash(t').
+func (h Hasher) Hash(t types.Type) uint32 {
+ hash, ok := h.memo[t]
+ if !ok {
+ hash = h.hashFor(t)
+ h.memo[t] = hash
+ }
+ return hash
+}
+
+// hashString computes the Fowler–Noll–Vo hash of s.
+func hashString(s string) uint32 {
+ var h uint32
+ for i := 0; i < len(s); i++ {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ return h
+}
+
+// hashFor computes the hash of t.
+func (h Hasher) hashFor(t types.Type) uint32 {
+ // See Identical for rationale.
+ switch t := t.(type) {
+ case *types.Basic:
+ return uint32(t.Kind())
+
+ case *types.Array:
+ return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
+
+ case *types.Slice:
+ return 9049 + 2*h.Hash(t.Elem())
+
+ case *types.Struct:
+ var hash uint32 = 9059
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ f := t.Field(i)
+ if f.Anonymous() {
+ hash += 8861
+ }
+ hash += hashString(t.Tag(i))
+ hash += hashString(f.Name()) // (ignore f.Pkg)
+ hash += h.Hash(f.Type())
+ }
+ return hash
+
+ case *types.Pointer:
+ return 9067 + 2*h.Hash(t.Elem())
+
+ case *types.Signature:
+ var hash uint32 = 9091
+ if t.Variadic() {
+ hash *= 8863
+ }
+ return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
+
+ case *types.Interface:
+ var hash uint32 = 9103
+ for i, n := 0, t.NumMethods(); i < n; i++ {
+ // See go/types.identicalMethods for rationale.
+ // Method order is not significant.
+ // Ignore m.Pkg().
+ m := t.Method(i)
+ hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type())
+ }
+ return hash
+
+ case *types.Map:
+ return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
+
+ case *types.Chan:
+ return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
+
+ case *types.Named:
+ // Not safe with a copying GC; objects may move.
+ return uint32(reflect.ValueOf(t.Obj()).Pointer())
+
+ case *types.Tuple:
+ return h.hashTuple(t)
+ }
+ panic(t)
+}
+
+func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
+ // See go/types.identicalTypes for rationale.
+ n := tuple.Len()
+ var hash uint32 = 9137 + 2*uint32(n)
+ for i := 0; i < n; i++ {
+ hash += 3 * h.Hash(tuple.At(i).Type())
+ }
+ return hash
+}
diff --git a/llgo/third_party/go.tools/go/types/typeutil/map_test.go b/llgo/third_party/go.tools/go/types/typeutil/map_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..cd200f5d11087c98ccd8bfa23400477a4a1051d5
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/typeutil/map_test.go
@@ -0,0 +1,174 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil_test
+
+// TODO(adonovan):
+// - test use of explicit hasher across two maps.
+// - test hashcodes are consistent with equals for a range of types
+// (e.g. all types generated by type-checking some body of real code).
+
+import (
+ "testing"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llgo/third_party/go.tools/go/types/typeutil"
+)
+
+var (
+ tStr = types.Typ[types.String] // string
+ tPStr1 = types.NewPointer(tStr) // *string
+ tPStr2 = types.NewPointer(tStr) // *string, again
+ tInt = types.Typ[types.Int] // int
+ tChanInt1 = types.NewChan(types.RecvOnly, tInt) // <-chan int
+ tChanInt2 = types.NewChan(types.RecvOnly, tInt) // <-chan int, again
+)
+
+func checkEqualButNotIdentical(t *testing.T, x, y types.Type, comment string) {
+ if !types.Identical(x, y) {
+ t.Errorf("%s: not equal: %s, %s", comment, x, y)
+ }
+ if x == y {
+ t.Errorf("%s: identical: %v, %v", comment, x, y)
+ }
+}
+
+func TestAxioms(t *testing.T) {
+ checkEqualButNotIdentical(t, tPStr1, tPStr2, "tPstr{1,2}")
+ checkEqualButNotIdentical(t, tChanInt1, tChanInt2, "tChanInt{1,2}")
+}
+
+func TestMap(t *testing.T) {
+ var tmap *typeutil.Map
+
+ // All methods but Set are safe on on (*T)(nil).
+ tmap.Len()
+ tmap.At(tPStr1)
+ tmap.Delete(tPStr1)
+ tmap.KeysString()
+ tmap.String()
+
+ tmap = new(typeutil.Map)
+
+ // Length of empty map.
+ if l := tmap.Len(); l != 0 {
+ t.Errorf("Len() on empty Map: got %d, want 0", l)
+ }
+ // At of missing key.
+ if v := tmap.At(tPStr1); v != nil {
+ t.Errorf("At() on empty Map: got %v, want nil", v)
+ }
+ // Deletion of missing key.
+ if tmap.Delete(tPStr1) {
+ t.Errorf("Delete() on empty Map: got true, want false")
+ }
+ // Set of new key.
+ if prev := tmap.Set(tPStr1, "*string"); prev != nil {
+ t.Errorf("Set() on empty Map returned non-nil previous value %s", prev)
+ }
+
+ // Now: {*string: "*string"}
+
+ // Length of non-empty map.
+ if l := tmap.Len(); l != 1 {
+ t.Errorf("Len(): got %d, want 1", l)
+ }
+ // At via insertion key.
+ if v := tmap.At(tPStr1); v != "*string" {
+ t.Errorf("At(): got %q, want \"*string\"", v)
+ }
+ // At via equal key.
+ if v := tmap.At(tPStr2); v != "*string" {
+ t.Errorf("At(): got %q, want \"*string\"", v)
+ }
+ // Iteration over sole entry.
+ tmap.Iterate(func(key types.Type, value interface{}) {
+ if key != tPStr1 {
+ t.Errorf("Iterate: key: got %s, want %s", key, tPStr1)
+ }
+ if want := "*string"; value != want {
+ t.Errorf("Iterate: value: got %s, want %s", value, want)
+ }
+ })
+
+ // Setion with key equal to present one.
+ if prev := tmap.Set(tPStr2, "*string again"); prev != "*string" {
+ t.Errorf("Set() previous value: got %s, want \"*string\"", prev)
+ }
+
+ // Setion of another association.
+ if prev := tmap.Set(tChanInt1, "<-chan int"); prev != nil {
+ t.Errorf("Set() previous value: got %s, want nil", prev)
+ }
+
+ // Now: {*string: "*string again", <-chan int: "<-chan int"}
+
+ want1 := "{*string: \"*string again\", <-chan int: \"<-chan int\"}"
+ want2 := "{<-chan int: \"<-chan int\", *string: \"*string again\"}"
+ if s := tmap.String(); s != want1 && s != want2 {
+ t.Errorf("String(): got %s, want %s", s, want1)
+ }
+
+ want1 = "{*string, <-chan int}"
+ want2 = "{<-chan int, *string}"
+ if s := tmap.KeysString(); s != want1 && s != want2 {
+ t.Errorf("KeysString(): got %s, want %s", s, want1)
+ }
+
+ // Keys().
+ I := types.Identical
+ switch k := tmap.Keys(); {
+ case I(k[0], tChanInt1) && I(k[1], tPStr1): // ok
+ case I(k[1], tChanInt1) && I(k[0], tPStr1): // ok
+ default:
+ t.Errorf("Keys(): got %v, want %s", k, want2)
+ }
+
+ if l := tmap.Len(); l != 2 {
+ t.Errorf("Len(): got %d, want 1", l)
+ }
+ // At via original key.
+ if v := tmap.At(tPStr1); v != "*string again" {
+ t.Errorf("At(): got %q, want \"*string again\"", v)
+ }
+ hamming := 1
+ tmap.Iterate(func(key types.Type, value interface{}) {
+ switch {
+ case I(key, tChanInt1):
+ hamming *= 2 // ok
+ case I(key, tPStr1):
+ hamming *= 3 // ok
+ }
+ })
+ if hamming != 6 {
+ t.Errorf("Iterate: hamming: got %d, want %d", hamming, 6)
+ }
+
+ if v := tmap.At(tChanInt2); v != "<-chan int" {
+ t.Errorf("At(): got %q, want \"<-chan int\"", v)
+ }
+ // Deletion with key equal to present one.
+ if !tmap.Delete(tChanInt2) {
+ t.Errorf("Delete() of existing key: got false, want true")
+ }
+
+ // Now: {*string: "*string again"}
+
+ if l := tmap.Len(); l != 1 {
+ t.Errorf("Len(): got %d, want 1", l)
+ }
+ // Deletion again.
+ if !tmap.Delete(tPStr2) {
+ t.Errorf("Delete() of existing key: got false, want true")
+ }
+
+ // Now: {}
+
+ if l := tmap.Len(); l != 0 {
+ t.Errorf("Len(): got %d, want %d", l, 0)
+ }
+ if s := tmap.String(); s != "{}" {
+ t.Errorf("Len(): got %q, want %q", s, "")
+ }
+}
diff --git a/llgo/third_party/go.tools/go/types/typeutil/ui.go b/llgo/third_party/go.tools/go/types/typeutil/ui.go
new file mode 100644
index 0000000000000000000000000000000000000000..0eb20fa60ecd53b40da396a1ad7905989e837d7e
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/typeutil/ui.go
@@ -0,0 +1,38 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+// This file defines utilities for user interfaces that display types.
+
+import "llvm.org/llgo/third_party/go.tools/go/types"
+
+// IntuitiveMethodSet returns the intuitive method set of a type, T.
+//
+// The result contains MethodSet(T) and additionally, if T is a
+// concrete type, methods belonging to *T if there is no identically
+// named method on T itself. This corresponds to user intuition about
+// method sets; this function is intended only for user interfaces.
+//
+// The order of the result is as for types.MethodSet(T).
+//
+func IntuitiveMethodSet(T types.Type, msets *types.MethodSetCache) []*types.Selection {
+ var result []*types.Selection
+ mset := msets.MethodSet(T)
+ if _, ok := T.Underlying().(*types.Interface); ok {
+ for i, n := 0, mset.Len(); i < n; i++ {
+ result = append(result, mset.At(i))
+ }
+ } else {
+ pmset := msets.MethodSet(types.NewPointer(T))
+ for i, n := 0, pmset.Len(); i < n; i++ {
+ meth := pmset.At(i)
+ if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
+ meth = m
+ }
+ result = append(result, meth)
+ }
+ }
+ return result
+}
diff --git a/llgo/third_party/go.tools/go/types/typexpr.go b/llgo/third_party/go.tools/go/types/typexpr.go
new file mode 100644
index 0000000000000000000000000000000000000000..2470aa7cb462769224e6c928ca0591ecbe4bd71a
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/typexpr.go
@@ -0,0 +1,721 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements type-checking of identifiers and type expressions.
+
+package types
+
+import (
+ "go/ast"
+ "go/token"
+ "sort"
+ "strconv"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+)
+
+// ident type-checks identifier e and initializes x with the value or type of e.
+// If an error occurred, x.mode is set to invalid.
+// For the meaning of def and path, see check.typ, below.
+//
+func (check *Checker) ident(x *operand, e *ast.Ident, def *Named, path []*TypeName) {
+ x.mode = invalid
+ x.expr = e
+
+ scope, obj := check.scope.LookupParent(e.Name)
+ if obj == nil {
+ if e.Name == "_" {
+ check.errorf(e.Pos(), "cannot use _ as value or type")
+ } else {
+ check.errorf(e.Pos(), "undeclared name: %s", e.Name)
+ }
+ return
+ }
+ check.recordUse(e, obj)
+
+ check.objDecl(obj, def, path)
+ typ := obj.Type()
+ assert(typ != nil)
+
+ // The object may be dot-imported: If so, remove its package from
+ // the map of unused dot imports for the respective file scope.
+ // (This code is only needed for dot-imports. Without them,
+ // we only have to mark variables, see *Var case below).
+ if pkg := obj.Pkg(); pkg != check.pkg && pkg != nil {
+ delete(check.unusedDotImports[scope], pkg)
+ }
+
+ switch obj := obj.(type) {
+ case *PkgName:
+ check.errorf(e.Pos(), "use of package %s not in selector", obj.name)
+ return
+
+ case *Const:
+ check.addDeclDep(obj)
+ if typ == Typ[Invalid] {
+ return
+ }
+ if obj == universeIota {
+ if check.iota == nil {
+ check.errorf(e.Pos(), "cannot use iota outside constant declaration")
+ return
+ }
+ x.val = check.iota
+ } else {
+ x.val = obj.val
+ }
+ assert(x.val != nil)
+ x.mode = constant
+
+ case *TypeName:
+ x.mode = typexpr
+ // check for cycle
+ // (it's ok to iterate forward because each named type appears at most once in path)
+ for i, prev := range path {
+ if prev == obj {
+ check.errorf(obj.pos, "illegal cycle in declaration of %s", obj.name)
+ // print cycle
+ for _, obj := range path[i:] {
+ check.errorf(obj.Pos(), "\t%s refers to", obj.Name()) // secondary error, \t indented
+ }
+ check.errorf(obj.Pos(), "\t%s", obj.Name())
+ // maintain x.mode == typexpr despite error
+ typ = Typ[Invalid]
+ break
+ }
+ }
+
+ case *Var:
+ if obj.pkg == check.pkg {
+ obj.used = true
+ }
+ check.addDeclDep(obj)
+ if typ == Typ[Invalid] {
+ return
+ }
+ x.mode = variable
+
+ case *Func:
+ check.addDeclDep(obj)
+ x.mode = value
+
+ case *Builtin:
+ x.id = obj.id
+ x.mode = builtin
+
+ case *Nil:
+ x.mode = value
+
+ default:
+ unreachable()
+ }
+
+ x.typ = typ
+}
+
+// typExpr type-checks the type expression e and returns its type, or Typ[Invalid].
+// If def != nil, e is the type specification for the named type def, declared
+// in a type declaration, and def.underlying will be set to the type of e before
+// any components of e are type-checked. Path contains the path of named types
+// referring to this type.
+//
+func (check *Checker) typExpr(e ast.Expr, def *Named, path []*TypeName) (T Type) {
+ if trace {
+ check.trace(e.Pos(), "%s", e)
+ check.indent++
+ defer func() {
+ check.indent--
+ check.trace(e.Pos(), "=> %s", T)
+ }()
+ }
+
+ T = check.typExprInternal(e, def, path)
+ assert(isTyped(T))
+ check.recordTypeAndValue(e, typexpr, T, nil)
+
+ return
+}
+
+func (check *Checker) typ(e ast.Expr) Type {
+ return check.typExpr(e, nil, nil)
+}
+
+// funcType type-checks a function or method type and returns its signature.
+func (check *Checker) funcType(sig *Signature, recvPar *ast.FieldList, ftyp *ast.FuncType) *Signature {
+ scope := NewScope(check.scope, "function")
+ check.recordScope(ftyp, scope)
+
+ recvList, _ := check.collectParams(scope, recvPar, false)
+ params, variadic := check.collectParams(scope, ftyp.Params, true)
+ results, _ := check.collectParams(scope, ftyp.Results, false)
+
+ if recvPar != nil {
+ // recv parameter list present (may be empty)
+ // spec: "The receiver is specified via an extra parameter section preceeding the
+ // method name. That parameter section must declare a single parameter, the receiver."
+ var recv *Var
+ switch len(recvList) {
+ case 0:
+ check.error(recvPar.Pos(), "method is missing receiver")
+ recv = NewParam(0, nil, "", Typ[Invalid]) // ignore recv below
+ default:
+ // more than one receiver
+ check.error(recvList[len(recvList)-1].Pos(), "method must have exactly one receiver")
+ fallthrough // continue with first receiver
+ case 1:
+ recv = recvList[0]
+ }
+ // spec: "The receiver type must be of the form T or *T where T is a type name."
+ // (ignore invalid types - error was reported before)
+ if t, _ := deref(recv.typ); t != Typ[Invalid] {
+ var err string
+ if T, _ := t.(*Named); T != nil {
+ // spec: "The type denoted by T is called the receiver base type; it must not
+ // be a pointer or interface type and it must be declared in the same package
+ // as the method."
+ if T.obj.pkg != check.pkg {
+ err = "type not defined in this package"
+ } else {
+ // TODO(gri) This is not correct if the underlying type is unknown yet.
+ switch u := T.underlying.(type) {
+ case *Basic:
+ // unsafe.Pointer is treated like a regular pointer
+ if u.kind == UnsafePointer {
+ err = "unsafe.Pointer"
+ }
+ case *Pointer, *Interface:
+ err = "pointer or interface type"
+ }
+ }
+ } else {
+ err = "basic or unnamed type"
+ }
+ if err != "" {
+ check.errorf(recv.pos, "invalid receiver %s (%s)", recv.typ, err)
+ // ok to continue
+ }
+ }
+ sig.recv = recv
+ }
+
+ sig.scope = scope
+ sig.params = NewTuple(params...)
+ sig.results = NewTuple(results...)
+ sig.variadic = variadic
+
+ return sig
+}
+
+// typExprInternal drives type checking of types.
+// Must only be called by typExpr.
+//
+func (check *Checker) typExprInternal(e ast.Expr, def *Named, path []*TypeName) Type {
+ switch e := e.(type) {
+ case *ast.BadExpr:
+ // ignore - error reported before
+
+ case *ast.Ident:
+ var x operand
+ check.ident(&x, e, def, path)
+
+ switch x.mode {
+ case typexpr:
+ typ := x.typ
+ def.setUnderlying(typ)
+ return typ
+ case invalid:
+ // ignore - error reported before
+ case novalue:
+ check.errorf(x.pos(), "%s used as type", &x)
+ default:
+ check.errorf(x.pos(), "%s is not a type", &x)
+ }
+
+ case *ast.SelectorExpr:
+ var x operand
+ check.selector(&x, e)
+
+ switch x.mode {
+ case typexpr:
+ typ := x.typ
+ def.setUnderlying(typ)
+ return typ
+ case invalid:
+ // ignore - error reported before
+ case novalue:
+ check.errorf(x.pos(), "%s used as type", &x)
+ default:
+ check.errorf(x.pos(), "%s is not a type", &x)
+ }
+
+ case *ast.ParenExpr:
+ return check.typExpr(e.X, def, path)
+
+ case *ast.ArrayType:
+ if e.Len != nil {
+ typ := new(Array)
+ def.setUnderlying(typ)
+ typ.len = check.arrayLength(e.Len)
+ typ.elem = check.typExpr(e.Elt, nil, path)
+ return typ
+
+ } else {
+ typ := new(Slice)
+ def.setUnderlying(typ)
+ typ.elem = check.typ(e.Elt)
+ return typ
+ }
+
+ case *ast.StructType:
+ typ := new(Struct)
+ def.setUnderlying(typ)
+ check.structType(typ, e, path)
+ return typ
+
+ case *ast.StarExpr:
+ typ := new(Pointer)
+ def.setUnderlying(typ)
+ typ.base = check.typ(e.X)
+ return typ
+
+ case *ast.FuncType:
+ typ := new(Signature)
+ def.setUnderlying(typ)
+ check.funcType(typ, nil, e)
+ return typ
+
+ case *ast.InterfaceType:
+ typ := new(Interface)
+ def.setUnderlying(typ)
+ check.interfaceType(typ, e, def, path)
+ return typ
+
+ case *ast.MapType:
+ typ := new(Map)
+ def.setUnderlying(typ)
+
+ typ.key = check.typ(e.Key)
+ typ.elem = check.typ(e.Value)
+
+ // spec: "The comparison operators == and != must be fully defined
+ // for operands of the key type; thus the key type must not be a
+ // function, map, or slice."
+ //
+ // Delay this check because it requires fully setup types;
+ // it is safe to continue in any case (was issue 6667).
+ check.delay(func() {
+ if !Comparable(typ.key) {
+ check.errorf(e.Key.Pos(), "invalid map key type %s", typ.key)
+ }
+ })
+
+ return typ
+
+ case *ast.ChanType:
+ typ := new(Chan)
+ def.setUnderlying(typ)
+
+ dir := SendRecv
+ switch e.Dir {
+ case ast.SEND | ast.RECV:
+ // nothing to do
+ case ast.SEND:
+ dir = SendOnly
+ case ast.RECV:
+ dir = RecvOnly
+ default:
+ check.invalidAST(e.Pos(), "unknown channel direction %d", e.Dir)
+ // ok to continue
+ }
+
+ typ.dir = dir
+ typ.elem = check.typ(e.Value)
+ return typ
+
+ default:
+ check.errorf(e.Pos(), "%s is not a type", e)
+ }
+
+ typ := Typ[Invalid]
+ def.setUnderlying(typ)
+ return typ
+}
+
+// typeOrNil type-checks the type expression (or nil value) e
+// and returns the typ of e, or nil.
+// If e is neither a type nor nil, typOrNil returns Typ[Invalid].
+//
+func (check *Checker) typOrNil(e ast.Expr) Type {
+ var x operand
+ check.rawExpr(&x, e, nil)
+ switch x.mode {
+ case invalid:
+ // ignore - error reported before
+ case novalue:
+ check.errorf(x.pos(), "%s used as type", &x)
+ case typexpr:
+ return x.typ
+ case value:
+ if x.isNil() {
+ return nil
+ }
+ fallthrough
+ default:
+ check.errorf(x.pos(), "%s is not a type", &x)
+ }
+ return Typ[Invalid]
+}
+
+func (check *Checker) arrayLength(e ast.Expr) int64 {
+ var x operand
+ check.expr(&x, e)
+ if x.mode != constant {
+ if x.mode != invalid {
+ check.errorf(x.pos(), "array length %s must be constant", &x)
+ }
+ return 0
+ }
+ if !x.isInteger() {
+ check.errorf(x.pos(), "array length %s must be integer", &x)
+ return 0
+ }
+ n, ok := exact.Int64Val(x.val)
+ if !ok || n < 0 {
+ check.errorf(x.pos(), "invalid array length %s", &x)
+ return 0
+ }
+ return n
+}
+
+func (check *Checker) collectParams(scope *Scope, list *ast.FieldList, variadicOk bool) (params []*Var, variadic bool) {
+ if list == nil {
+ return
+ }
+
+ var named, anonymous bool
+ for i, field := range list.List {
+ ftype := field.Type
+ if t, _ := ftype.(*ast.Ellipsis); t != nil {
+ ftype = t.Elt
+ if variadicOk && i == len(list.List)-1 {
+ variadic = true
+ } else {
+ check.invalidAST(field.Pos(), "... not permitted")
+ // ignore ... and continue
+ }
+ }
+ typ := check.typ(ftype)
+ // The parser ensures that f.Tag is nil and we don't
+ // care if a constructed AST contains a non-nil tag.
+ if len(field.Names) > 0 {
+ // named parameter
+ for _, name := range field.Names {
+ if name.Name == "" {
+ check.invalidAST(name.Pos(), "anonymous parameter")
+ // ok to continue
+ }
+ par := NewParam(name.Pos(), check.pkg, name.Name, typ)
+ check.declare(scope, name, par)
+ params = append(params, par)
+ }
+ named = true
+ } else {
+ // anonymous parameter
+ par := NewParam(ftype.Pos(), check.pkg, "", typ)
+ check.recordImplicit(field, par)
+ params = append(params, par)
+ anonymous = true
+ }
+ }
+
+ if named && anonymous {
+ check.invalidAST(list.Pos(), "list contains both named and anonymous parameters")
+ // ok to continue
+ }
+
+ // For a variadic function, change the last parameter's type from T to []T.
+ if variadic && len(params) > 0 {
+ last := params[len(params)-1]
+ last.typ = &Slice{elem: last.typ}
+ }
+
+ return
+}
+
+func (check *Checker) declareInSet(oset *objset, pos token.Pos, obj Object) bool {
+ if alt := oset.insert(obj); alt != nil {
+ check.errorf(pos, "%s redeclared", obj.Name())
+ check.reportAltDecl(alt)
+ return false
+ }
+ return true
+}
+
+func (check *Checker) interfaceType(iface *Interface, ityp *ast.InterfaceType, def *Named, path []*TypeName) {
+ // empty interface: common case
+ if ityp.Methods == nil {
+ return
+ }
+
+ // The parser ensures that field tags are nil and we don't
+ // care if a constructed AST contains non-nil tags.
+
+ // use named receiver type if available (for better error messages)
+ var recvTyp Type = iface
+ if def != nil {
+ recvTyp = def
+ }
+
+ // Phase 1: Collect explicitly declared methods, the corresponding
+ // signature (AST) expressions, and the list of embedded
+ // type (AST) expressions. Do not resolve signatures or
+ // embedded types yet to avoid cycles referring to this
+ // interface.
+
+ var (
+ mset objset
+ signatures []ast.Expr // list of corresponding method signatures
+ embedded []ast.Expr // list of embedded types
+ )
+ for _, f := range ityp.Methods.List {
+ if len(f.Names) > 0 {
+ // The parser ensures that there's only one method
+ // and we don't care if a constructed AST has more.
+ name := f.Names[0]
+ pos := name.Pos()
+ // spec: "As with all method sets, in an interface type,
+ // each method must have a unique non-blank name."
+ if name.Name == "_" {
+ check.errorf(pos, "invalid method name _")
+ continue
+ }
+ // Don't type-check signature yet - use an
+ // empty signature now and update it later.
+ // Since we know the receiver, set it up now
+ // (required to avoid crash in ptrRecv; see
+ // e.g. test case for issue 6638).
+ // TODO(gri) Consider marking methods signatures
+ // as incomplete, for better error messages. See
+ // also the T4 and T5 tests in testdata/cycles2.src.
+ sig := new(Signature)
+ sig.recv = NewVar(pos, check.pkg, "", recvTyp)
+ m := NewFunc(pos, check.pkg, name.Name, sig)
+ if check.declareInSet(&mset, pos, m) {
+ iface.methods = append(iface.methods, m)
+ iface.allMethods = append(iface.allMethods, m)
+ signatures = append(signatures, f.Type)
+ check.recordDef(name, m)
+ }
+ } else {
+ // embedded type
+ embedded = append(embedded, f.Type)
+ }
+ }
+
+ // Phase 2: Resolve embedded interfaces. Because an interface must not
+ // embed itself (directly or indirectly), each embedded interface
+ // can be fully resolved without depending on any method of this
+ // interface (if there is a cycle or another error, the embedded
+ // type resolves to an invalid type and is ignored).
+ // In particular, the list of methods for each embedded interface
+ // must be complete (it cannot depend on this interface), and so
+ // those methods can be added to the list of all methods of this
+ // interface.
+
+ for _, e := range embedded {
+ pos := e.Pos()
+ typ := check.typExpr(e, nil, path)
+ named, _ := typ.(*Named)
+ if named == nil {
+ if typ != Typ[Invalid] {
+ check.invalidAST(pos, "%s is not named type", typ)
+ }
+ continue
+ }
+ // determine underlying (possibly incomplete) type
+ // by following its forward chain
+ u := underlying(named)
+ embed, _ := u.(*Interface)
+ if embed == nil {
+ if u != Typ[Invalid] {
+ check.errorf(pos, "%s is not an interface", named)
+ }
+ continue
+ }
+ iface.embeddeds = append(iface.embeddeds, named)
+ // collect embedded methods
+ for _, m := range embed.allMethods {
+ if check.declareInSet(&mset, pos, m) {
+ iface.allMethods = append(iface.allMethods, m)
+ }
+ }
+ }
+
+ // Phase 3: At this point all methods have been collected for this interface.
+ // It is now safe to type-check the signatures of all explicitly
+ // declared methods, even if they refer to this interface via a cycle
+ // and embed the methods of this interface in a parameter of interface
+ // type.
+
+ for i, m := range iface.methods {
+ expr := signatures[i]
+ typ := check.typ(expr)
+ sig, _ := typ.(*Signature)
+ if sig == nil {
+ if typ != Typ[Invalid] {
+ check.invalidAST(expr.Pos(), "%s is not a method signature", typ)
+ }
+ continue // keep method with empty method signature
+ }
+ // update signature, but keep recv that was set up before
+ old := m.typ.(*Signature)
+ sig.recv = old.recv
+ *old = *sig // update signature (don't replace it!)
+ }
+
+ // TODO(gri) The list of explicit methods is only sorted for now to
+ // produce the same Interface as NewInterface. We may be able to
+ // claim source order in the future. Revisit.
+ sort.Sort(byUniqueMethodName(iface.methods))
+
+ // TODO(gri) The list of embedded types is only sorted for now to
+ // produce the same Interface as NewInterface. We may be able to
+ // claim source order in the future. Revisit.
+ sort.Sort(byUniqueTypeName(iface.embeddeds))
+
+ sort.Sort(byUniqueMethodName(iface.allMethods))
+}
+
+// byUniqueTypeName named type lists can be sorted by their unique type names.
+type byUniqueTypeName []*Named
+
+func (a byUniqueTypeName) Len() int { return len(a) }
+func (a byUniqueTypeName) Less(i, j int) bool { return a[i].obj.Id() < a[j].obj.Id() }
+func (a byUniqueTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// byUniqueMethodName method lists can be sorted by their unique method names.
+type byUniqueMethodName []*Func
+
+func (a byUniqueMethodName) Len() int { return len(a) }
+func (a byUniqueMethodName) Less(i, j int) bool { return a[i].Id() < a[j].Id() }
+func (a byUniqueMethodName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+func (check *Checker) tag(t *ast.BasicLit) string {
+ if t != nil {
+ if t.Kind == token.STRING {
+ if val, err := strconv.Unquote(t.Value); err == nil {
+ return val
+ }
+ }
+ check.invalidAST(t.Pos(), "incorrect tag syntax: %q", t.Value)
+ }
+ return ""
+}
+
+func (check *Checker) structType(styp *Struct, e *ast.StructType, path []*TypeName) {
+ list := e.Fields
+ if list == nil {
+ return
+ }
+
+ // struct fields and tags
+ var fields []*Var
+ var tags []string
+
+ // for double-declaration checks
+ var fset objset
+
+ // current field typ and tag
+ var typ Type
+ var tag string
+ // anonymous != nil indicates an anonymous field.
+ add := func(field *ast.Field, ident *ast.Ident, anonymous *TypeName, pos token.Pos) {
+ if tag != "" && tags == nil {
+ tags = make([]string, len(fields))
+ }
+ if tags != nil {
+ tags = append(tags, tag)
+ }
+
+ name := ident.Name
+ fld := NewField(pos, check.pkg, name, typ, anonymous != nil)
+ // spec: "Within a struct, non-blank field names must be unique."
+ if name == "_" || check.declareInSet(&fset, pos, fld) {
+ fields = append(fields, fld)
+ check.recordDef(ident, fld)
+ }
+ if anonymous != nil {
+ check.recordUse(ident, anonymous)
+ }
+ }
+
+ for _, f := range list.List {
+ typ = check.typExpr(f.Type, nil, path)
+ tag = check.tag(f.Tag)
+ if len(f.Names) > 0 {
+ // named fields
+ for _, name := range f.Names {
+ add(f, name, nil, name.Pos())
+ }
+ } else {
+ // anonymous field
+ name := anonymousFieldIdent(f.Type)
+ pos := f.Type.Pos()
+ t, isPtr := deref(typ)
+ switch t := t.(type) {
+ case *Basic:
+ if t == Typ[Invalid] {
+ // error was reported before
+ continue
+ }
+ // unsafe.Pointer is treated like a regular pointer
+ if t.kind == UnsafePointer {
+ check.errorf(pos, "anonymous field type cannot be unsafe.Pointer")
+ continue
+ }
+ add(f, name, Universe.Lookup(t.name).(*TypeName), pos)
+
+ case *Named:
+ // spec: "An embedded type must be specified as a type name
+ // T or as a pointer to a non-interface type name *T, and T
+ // itself may not be a pointer type."
+ switch u := t.underlying.(type) {
+ case *Basic:
+ // unsafe.Pointer is treated like a regular pointer
+ if u.kind == UnsafePointer {
+ check.errorf(pos, "anonymous field type cannot be unsafe.Pointer")
+ continue
+ }
+ case *Pointer:
+ check.errorf(pos, "anonymous field type cannot be a pointer")
+ continue
+ case *Interface:
+ if isPtr {
+ check.errorf(pos, "anonymous field type cannot be a pointer to an interface")
+ continue
+ }
+ }
+ add(f, name, t.obj, pos)
+
+ default:
+ check.invalidAST(pos, "anonymous field type %s must be named", typ)
+ }
+ }
+ }
+
+ styp.fields = fields
+ styp.tags = tags
+}
+
+func anonymousFieldIdent(e ast.Expr) *ast.Ident {
+ switch e := e.(type) {
+ case *ast.Ident:
+ return e
+ case *ast.StarExpr:
+ return anonymousFieldIdent(e.X)
+ case *ast.SelectorExpr:
+ return e.Sel
+ }
+ return nil // invalid anonymous field
+}
diff --git a/llgo/third_party/go.tools/go/types/universe.go b/llgo/third_party/go.tools/go/types/universe.go
new file mode 100644
index 0000000000000000000000000000000000000000..1ab78b74d568c77144aef68868e7333382196d79
--- /dev/null
+++ b/llgo/third_party/go.tools/go/types/universe.go
@@ -0,0 +1,224 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements the universe and unsafe package scopes.
+
+package types
+
+import (
+ "go/token"
+ "strings"
+
+ "llvm.org/llgo/third_party/go.tools/go/exact"
+)
+
+var (
+ Universe *Scope
+ Unsafe *Package
+ universeIota *Const
+ UniverseByte *Basic // uint8 alias, but has name "byte"
+ UniverseRune *Basic // int32 alias, but has name "rune"
+)
+
+var Typ = [...]*Basic{
+ Invalid: {Invalid, 0, "invalid type"},
+
+ Bool: {Bool, IsBoolean, "bool"},
+ Int: {Int, IsInteger, "int"},
+ Int8: {Int8, IsInteger, "int8"},
+ Int16: {Int16, IsInteger, "int16"},
+ Int32: {Int32, IsInteger, "int32"},
+ Int64: {Int64, IsInteger, "int64"},
+ Uint: {Uint, IsInteger | IsUnsigned, "uint"},
+ Uint8: {Uint8, IsInteger | IsUnsigned, "uint8"},
+ Uint16: {Uint16, IsInteger | IsUnsigned, "uint16"},
+ Uint32: {Uint32, IsInteger | IsUnsigned, "uint32"},
+ Uint64: {Uint64, IsInteger | IsUnsigned, "uint64"},
+ Uintptr: {Uintptr, IsInteger | IsUnsigned, "uintptr"},
+ Float32: {Float32, IsFloat, "float32"},
+ Float64: {Float64, IsFloat, "float64"},
+ Complex64: {Complex64, IsComplex, "complex64"},
+ Complex128: {Complex128, IsComplex, "complex128"},
+ String: {String, IsString, "string"},
+ UnsafePointer: {UnsafePointer, 0, "Pointer"},
+
+ UntypedBool: {UntypedBool, IsBoolean | IsUntyped, "untyped bool"},
+ UntypedInt: {UntypedInt, IsInteger | IsUntyped, "untyped int"},
+ UntypedRune: {UntypedRune, IsInteger | IsUntyped, "untyped rune"},
+ UntypedFloat: {UntypedFloat, IsFloat | IsUntyped, "untyped float"},
+ UntypedComplex: {UntypedComplex, IsComplex | IsUntyped, "untyped complex"},
+ UntypedString: {UntypedString, IsString | IsUntyped, "untyped string"},
+ UntypedNil: {UntypedNil, IsUntyped, "untyped nil"},
+}
+
+var aliases = [...]*Basic{
+ {Byte, IsInteger | IsUnsigned, "byte"},
+ {Rune, IsInteger, "rune"},
+}
+
+func defPredeclaredTypes() {
+ for _, t := range Typ {
+ def(NewTypeName(token.NoPos, nil, t.name, t))
+ }
+ for _, t := range aliases {
+ def(NewTypeName(token.NoPos, nil, t.name, t))
+ }
+
+ // Error has a nil package in its qualified name since it is in no package
+ res := NewVar(token.NoPos, nil, "", Typ[String])
+ sig := &Signature{results: NewTuple(res)}
+ err := NewFunc(token.NoPos, nil, "Error", sig)
+ typ := &Named{underlying: NewInterface([]*Func{err}, nil).Complete()}
+ sig.recv = NewVar(token.NoPos, nil, "", typ)
+ def(NewTypeName(token.NoPos, nil, "error", typ))
+}
+
+var predeclaredConsts = [...]struct {
+ name string
+ kind BasicKind
+ val exact.Value
+}{
+ {"true", UntypedBool, exact.MakeBool(true)},
+ {"false", UntypedBool, exact.MakeBool(false)},
+ {"iota", UntypedInt, exact.MakeInt64(0)},
+}
+
+func defPredeclaredConsts() {
+ for _, c := range predeclaredConsts {
+ def(NewConst(token.NoPos, nil, c.name, Typ[c.kind], c.val))
+ }
+}
+
+func defPredeclaredNil() {
+ def(&Nil{object{name: "nil", typ: Typ[UntypedNil]}})
+}
+
+// A builtinId is the id of a builtin function.
+type builtinId int
+
+const (
+ // universe scope
+ _Append builtinId = iota
+ _Cap
+ _Close
+ _Complex
+ _Copy
+ _Delete
+ _Imag
+ _Len
+ _Make
+ _New
+ _Panic
+ _Print
+ _Println
+ _Real
+ _Recover
+
+ // package unsafe
+ _Alignof
+ _Offsetof
+ _Sizeof
+
+ // testing support
+ _Assert
+ _Trace
+)
+
+var predeclaredFuncs = [...]struct {
+ name string
+ nargs int
+ variadic bool
+ kind exprKind
+}{
+ _Append: {"append", 1, true, expression},
+ _Cap: {"cap", 1, false, expression},
+ _Close: {"close", 1, false, statement},
+ _Complex: {"complex", 2, false, expression},
+ _Copy: {"copy", 2, false, statement},
+ _Delete: {"delete", 2, false, statement},
+ _Imag: {"imag", 1, false, expression},
+ _Len: {"len", 1, false, expression},
+ _Make: {"make", 1, true, expression},
+ _New: {"new", 1, false, expression},
+ _Panic: {"panic", 1, false, statement},
+ _Print: {"print", 0, true, statement},
+ _Println: {"println", 0, true, statement},
+ _Real: {"real", 1, false, expression},
+ _Recover: {"recover", 0, false, statement},
+
+ _Alignof: {"Alignof", 1, false, expression},
+ _Offsetof: {"Offsetof", 1, false, expression},
+ _Sizeof: {"Sizeof", 1, false, expression},
+
+ _Assert: {"assert", 1, false, statement},
+ _Trace: {"trace", 0, true, statement},
+}
+
+func defPredeclaredFuncs() {
+ for i := range predeclaredFuncs {
+ id := builtinId(i)
+ if id == _Assert || id == _Trace {
+ continue // only define these in testing environment
+ }
+ def(newBuiltin(id))
+ }
+}
+
+// DefPredeclaredTestFuncs defines the assert and trace built-ins.
+// These built-ins are intended for debugging and testing of this
+// package only.
+func DefPredeclaredTestFuncs() {
+ if Universe.Lookup("assert") != nil {
+ return // already defined
+ }
+ def(newBuiltin(_Assert))
+ def(newBuiltin(_Trace))
+}
+
+func init() {
+ Universe = NewScope(nil, "universe")
+ Unsafe = NewPackage("unsafe", "unsafe")
+ Unsafe.complete = true
+
+ defPredeclaredTypes()
+ defPredeclaredConsts()
+ defPredeclaredNil()
+ defPredeclaredFuncs()
+
+ universeIota = Universe.Lookup("iota").(*Const)
+ UniverseByte = Universe.Lookup("byte").(*TypeName).typ.(*Basic)
+ UniverseRune = Universe.Lookup("rune").(*TypeName).typ.(*Basic)
+}
+
+// Objects with names containing blanks are internal and not entered into
+// a scope. Objects with exported names are inserted in the unsafe package
+// scope; other objects are inserted in the universe scope.
+//
+func def(obj Object) {
+ name := obj.Name()
+ if strings.Index(name, " ") >= 0 {
+ return // nothing to do
+ }
+ // fix Obj link for named types
+ if typ, ok := obj.Type().(*Named); ok {
+ typ.obj = obj.(*TypeName)
+ }
+ // exported identifiers go into package unsafe
+ scope := Universe
+ if obj.Exported() {
+ scope = Unsafe.scope
+ // set Pkg field
+ switch obj := obj.(type) {
+ case *TypeName:
+ obj.pkg = Unsafe
+ case *Builtin:
+ obj.pkg = Unsafe
+ default:
+ unreachable()
+ }
+ }
+ if scope.Insert(obj) != nil {
+ panic("internal error: double declaration")
+ }
+}
diff --git a/llgo/third_party/go.tools/go/vcs/discovery.go b/llgo/third_party/go.tools/go/vcs/discovery.go
new file mode 100644
index 0000000000000000000000000000000000000000..d5c3fc6bb296adeb250cc4db497b13cd3e593cd6
--- /dev/null
+++ b/llgo/third_party/go.tools/go/vcs/discovery.go
@@ -0,0 +1,73 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcs
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// charsetReader returns a reader for the given charset. Currently
+// it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful
+// error which is printed by go get, so the user can find why the package
+// wasn't downloaded if the encoding is not supported. Note that, in
+// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters
+// greater than 0x7f are not rejected).
+func charsetReader(charset string, input io.Reader) (io.Reader, error) {
+ switch strings.ToLower(charset) {
+ case "ascii":
+ return input, nil
+ default:
+ return nil, fmt.Errorf("can't decode XML document using charset %q", charset)
+ }
+}
+
+// parseMetaGoImports returns meta imports from the HTML in r.
+// Parsing ends at the end of the section or the beginning of the .
+func parseMetaGoImports(r io.Reader) (imports []metaImport, err error) {
+ d := xml.NewDecoder(r)
+ d.CharsetReader = charsetReader
+ d.Strict = false
+ var t xml.Token
+ for {
+ t, err = d.Token()
+ if err != nil {
+ return
+ }
+ if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") {
+ return
+ }
+ if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") {
+ return
+ }
+ e, ok := t.(xml.StartElement)
+ if !ok || !strings.EqualFold(e.Name.Local, "meta") {
+ continue
+ }
+ if attrValue(e.Attr, "name") != "go-import" {
+ continue
+ }
+ if f := strings.Fields(attrValue(e.Attr, "content")); len(f) == 3 {
+ imports = append(imports, metaImport{
+ Prefix: f[0],
+ VCS: f[1],
+ RepoRoot: f[2],
+ })
+ }
+ }
+}
+
+// attrValue returns the attribute value for the case-insensitive key
+// `name', or the empty string if nothing is found.
+func attrValue(attrs []xml.Attr, name string) string {
+ for _, a := range attrs {
+ if strings.EqualFold(a.Name.Local, name) {
+ return a.Value
+ }
+ }
+ return ""
+}
diff --git a/llgo/third_party/go.tools/go/vcs/env.go b/llgo/third_party/go.tools/go/vcs/env.go
new file mode 100644
index 0000000000000000000000000000000000000000..e846f5b3b86640a335e9490203ae4a1b0fcf4ee6
--- /dev/null
+++ b/llgo/third_party/go.tools/go/vcs/env.go
@@ -0,0 +1,39 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcs
+
+import (
+ "os"
+ "strings"
+)
+
+// envForDir returns a copy of the environment
+// suitable for running in the given directory.
+// The environment is the current process's environment
+// but with an updated $PWD, so that an os.Getwd in the
+// child will be faster.
+func envForDir(dir string) []string {
+ env := os.Environ()
+ // Internally we only use rooted paths, so dir is rooted.
+ // Even if dir is not rooted, no harm done.
+ return mergeEnvLists([]string{"PWD=" + dir}, env)
+}
+
+// mergeEnvLists merges the two environment lists such that
+// variables with the same name in "in" replace those in "out".
+func mergeEnvLists(in, out []string) []string {
+NextVar:
+ for _, inkv := range in {
+ k := strings.SplitAfterN(inkv, "=", 2)[0]
+ for i, outkv := range out {
+ if strings.HasPrefix(outkv, k) {
+ out[i] = inkv
+ continue NextVar
+ }
+ }
+ out = append(out, inkv)
+ }
+ return out
+}
diff --git a/llgo/third_party/go.tools/go/vcs/http.go b/llgo/third_party/go.tools/go/vcs/http.go
new file mode 100644
index 0000000000000000000000000000000000000000..96188185cb6290923227b3fa2db65dbc9241f599
--- /dev/null
+++ b/llgo/third_party/go.tools/go/vcs/http.go
@@ -0,0 +1,80 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcs
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/url"
+)
+
+// httpClient is the default HTTP client, but a variable so it can be
+// changed by tests, without modifying http.DefaultClient.
+var httpClient = http.DefaultClient
+
+// httpGET returns the data from an HTTP GET request for the given URL.
+func httpGET(url string) ([]byte, error) {
+ resp, err := httpClient.Get(url)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ return nil, fmt.Errorf("%s: %s", url, resp.Status)
+ }
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %v", url, err)
+ }
+ return b, nil
+}
+
+// httpsOrHTTP returns the body of either the importPath's
+// https resource or, if unavailable, the http resource.
+func httpsOrHTTP(importPath string) (urlStr string, body io.ReadCloser, err error) {
+ fetch := func(scheme string) (urlStr string, res *http.Response, err error) {
+ u, err := url.Parse(scheme + "://" + importPath)
+ if err != nil {
+ return "", nil, err
+ }
+ u.RawQuery = "go-get=1"
+ urlStr = u.String()
+ if Verbose {
+ log.Printf("Fetching %s", urlStr)
+ }
+ res, err = httpClient.Get(urlStr)
+ return
+ }
+ closeBody := func(res *http.Response) {
+ if res != nil {
+ res.Body.Close()
+ }
+ }
+ urlStr, res, err := fetch("https")
+ if err != nil || res.StatusCode != 200 {
+ if Verbose {
+ if err != nil {
+ log.Printf("https fetch failed.")
+ } else {
+ log.Printf("ignoring https fetch with status code %d", res.StatusCode)
+ }
+ }
+ closeBody(res)
+ urlStr, res, err = fetch("http")
+ }
+ if err != nil {
+ closeBody(res)
+ return "", nil, err
+ }
+ // Note: accepting a non-200 OK here, so people can serve a
+ // meta import in their http 404 page.
+ if Verbose {
+ log.Printf("Parsing meta tags from %s (status code %d)", urlStr, res.StatusCode)
+ }
+ return urlStr, res.Body, nil
+}
diff --git a/llgo/third_party/go.tools/go/vcs/vcs.go b/llgo/third_party/go.tools/go/vcs/vcs.go
new file mode 100644
index 0000000000000000000000000000000000000000..586e1b8ac46f08b25d87577f912cd0cd1cecbd5b
--- /dev/null
+++ b/llgo/third_party/go.tools/go/vcs/vcs.go
@@ -0,0 +1,744 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcs
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// Verbose enables verbose operation logging.
+var Verbose bool
+
+// ShowCmd controls whether VCS commands are printed.
+var ShowCmd bool
+
+// A Cmd describes how to use a version control system
+// like Mercurial, Git, or Subversion.
+type Cmd struct {
+ Name string
+ Cmd string // name of binary to invoke command
+
+ CreateCmd string // command to download a fresh copy of a repository
+ DownloadCmd string // command to download updates into an existing repository
+
+ TagCmd []TagCmd // commands to list tags
+ TagLookupCmd []TagCmd // commands to lookup tags before running tagSyncCmd
+ TagSyncCmd string // command to sync to specific tag
+ TagSyncDefault string // command to sync to default tag
+
+ LogCmd string // command to list repository changelogs in an XML format
+
+ Scheme []string
+ PingCmd string
+}
+
+// A TagCmd describes a command to list available tags
+// that can be passed to Cmd.TagSyncCmd.
+type TagCmd struct {
+ Cmd string // command to list tags
+ Pattern string // regexp to extract tags from list
+}
+
+// vcsList lists the known version control systems
+var vcsList = []*Cmd{
+ vcsHg,
+ vcsGit,
+ vcsSvn,
+ vcsBzr,
+}
+
+// ByCmd returns the version control system for the given
+// command name (hg, git, svn, bzr).
+func ByCmd(cmd string) *Cmd {
+ for _, vcs := range vcsList {
+ if vcs.Cmd == cmd {
+ return vcs
+ }
+ }
+ return nil
+}
+
+// vcsHg describes how to use Mercurial.
+var vcsHg = &Cmd{
+ Name: "Mercurial",
+ Cmd: "hg",
+
+ CreateCmd: "clone -U {repo} {dir}",
+ DownloadCmd: "pull",
+
+ // We allow both tag and branch names as 'tags'
+ // for selecting a version. This lets people have
+ // a go.release.r60 branch and a go1 branch
+ // and make changes in both, without constantly
+ // editing .hgtags.
+ TagCmd: []TagCmd{
+ {"tags", `^(\S+)`},
+ {"branches", `^(\S+)`},
+ },
+ TagSyncCmd: "update -r {tag}",
+ TagSyncDefault: "update default",
+
+ LogCmd: "log --encoding=utf-8 --limit={limit} --template={template}",
+
+ Scheme: []string{"https", "http", "ssh"},
+ PingCmd: "identify {scheme}://{repo}",
+}
+
+// vcsGit describes how to use Git.
+var vcsGit = &Cmd{
+ Name: "Git",
+ Cmd: "git",
+
+ CreateCmd: "clone {repo} {dir}",
+ DownloadCmd: "pull --ff-only",
+
+ TagCmd: []TagCmd{
+ // tags/xxx matches a git tag named xxx
+ // origin/xxx matches a git branch named xxx on the default remote repository
+ {"show-ref", `(?:tags|origin)/(\S+)$`},
+ },
+ TagLookupCmd: []TagCmd{
+ {"show-ref tags/{tag} origin/{tag}", `((?:tags|origin)/\S+)$`},
+ },
+ TagSyncCmd: "checkout {tag}",
+ TagSyncDefault: "checkout master",
+
+ Scheme: []string{"git", "https", "http", "git+ssh"},
+ PingCmd: "ls-remote {scheme}://{repo}",
+}
+
+// vcsBzr describes how to use Bazaar.
+var vcsBzr = &Cmd{
+ Name: "Bazaar",
+ Cmd: "bzr",
+
+ CreateCmd: "branch {repo} {dir}",
+
+ // Without --overwrite bzr will not pull tags that changed.
+ // Replace by --overwrite-tags after http://pad.lv/681792 goes in.
+ DownloadCmd: "pull --overwrite",
+
+ TagCmd: []TagCmd{{"tags", `^(\S+)`}},
+ TagSyncCmd: "update -r {tag}",
+ TagSyncDefault: "update -r revno:-1",
+
+ Scheme: []string{"https", "http", "bzr", "bzr+ssh"},
+ PingCmd: "info {scheme}://{repo}",
+}
+
+// vcsSvn describes how to use Subversion.
+var vcsSvn = &Cmd{
+ Name: "Subversion",
+ Cmd: "svn",
+
+ CreateCmd: "checkout {repo} {dir}",
+ DownloadCmd: "update",
+
+ // There is no tag command in subversion.
+ // The branch information is all in the path names.
+
+ LogCmd: "log --xml --limit={limit}",
+
+ Scheme: []string{"https", "http", "svn", "svn+ssh"},
+ PingCmd: "info {scheme}://{repo}",
+}
+
+func (v *Cmd) String() string {
+ return v.Name
+}
+
+// run runs the command line cmd in the given directory.
+// keyval is a list of key, value pairs. run expands
+// instances of {key} in cmd into value, but only after
+// splitting cmd into individual arguments.
+// If an error occurs, run prints the command line and the
+// command's combined stdout+stderr to standard error.
+// Otherwise run discards the command's output.
+func (v *Cmd) run(dir string, cmd string, keyval ...string) error {
+ _, err := v.run1(dir, cmd, keyval, true)
+ return err
+}
+
+// runVerboseOnly is like run but only generates error output to standard error in verbose mode.
+func (v *Cmd) runVerboseOnly(dir string, cmd string, keyval ...string) error {
+ _, err := v.run1(dir, cmd, keyval, false)
+ return err
+}
+
+// runOutput is like run but returns the output of the command.
+func (v *Cmd) runOutput(dir string, cmd string, keyval ...string) ([]byte, error) {
+ return v.run1(dir, cmd, keyval, true)
+}
+
+// run1 is the generalized implementation of run and runOutput.
+func (v *Cmd) run1(dir string, cmdline string, keyval []string, verbose bool) ([]byte, error) {
+ m := make(map[string]string)
+ for i := 0; i < len(keyval); i += 2 {
+ m[keyval[i]] = keyval[i+1]
+ }
+ args := strings.Fields(cmdline)
+ for i, arg := range args {
+ args[i] = expand(m, arg)
+ }
+
+ _, err := exec.LookPath(v.Cmd)
+ if err != nil {
+ fmt.Fprintf(os.Stderr,
+ "go: missing %s command. See http://golang.org/s/gogetcmd\n",
+ v.Name)
+ return nil, err
+ }
+
+ cmd := exec.Command(v.Cmd, args...)
+ cmd.Dir = dir
+ cmd.Env = envForDir(cmd.Dir)
+ if ShowCmd {
+ fmt.Printf("cd %s\n", dir)
+ fmt.Printf("%s %s\n", v.Cmd, strings.Join(args, " "))
+ }
+ var buf bytes.Buffer
+ cmd.Stdout = &buf
+ cmd.Stderr = &buf
+ err = cmd.Run()
+ out := buf.Bytes()
+ if err != nil {
+ if verbose || Verbose {
+ fmt.Fprintf(os.Stderr, "# cd %s; %s %s\n", dir, v.Cmd, strings.Join(args, " "))
+ os.Stderr.Write(out)
+ }
+ return nil, err
+ }
+ return out, nil
+}
+
+// Ping pings the repo to determine if scheme used is valid.
+// This repo must be pingable with this scheme and VCS.
+func (v *Cmd) Ping(scheme, repo string) error {
+ return v.runVerboseOnly(".", v.PingCmd, "scheme", scheme, "repo", repo)
+}
+
+// Create creates a new copy of repo in dir.
+// The parent of dir must exist; dir must not.
+func (v *Cmd) Create(dir, repo string) error {
+ return v.run(".", v.CreateCmd, "dir", dir, "repo", repo)
+}
+
+// CreateAtRev creates a new copy of repo in dir at revision rev.
+// The parent of dir must exist; dir must not.
+// rev must be a valid revision in repo.
+func (v *Cmd) CreateAtRev(dir, repo, rev string) error {
+ // Append revision flag to CreateCmd
+ createAtRevCmd := v.CreateCmd + " --rev=" + rev
+ return v.run(".", createAtRevCmd, "dir", dir, "repo", repo)
+}
+
+// Download downloads any new changes for the repo in dir.
+// dir must be a valid VCS repo compatible with v.
+func (v *Cmd) Download(dir string) error {
+ return v.run(dir, v.DownloadCmd)
+}
+
+// Tags returns the list of available tags for the repo in dir.
+// dir must be a valid VCS repo compatible with v.
+func (v *Cmd) Tags(dir string) ([]string, error) {
+ var tags []string
+ for _, tc := range v.TagCmd {
+ out, err := v.runOutput(dir, tc.Cmd)
+ if err != nil {
+ return nil, err
+ }
+ re := regexp.MustCompile(`(?m-s)` + tc.Pattern)
+ for _, m := range re.FindAllStringSubmatch(string(out), -1) {
+ tags = append(tags, m[1])
+ }
+ }
+ return tags, nil
+}
+
+// TagSync syncs the repo in dir to the named tag,
+// which either is a tag returned by tags or is v.TagDefault.
+// dir must be a valid VCS repo compatible with v and the tag must exist.
+func (v *Cmd) TagSync(dir, tag string) error {
+ if v.TagSyncCmd == "" {
+ return nil
+ }
+ if tag != "" {
+ for _, tc := range v.TagLookupCmd {
+ out, err := v.runOutput(dir, tc.Cmd, "tag", tag)
+ if err != nil {
+ return err
+ }
+ re := regexp.MustCompile(`(?m-s)` + tc.Pattern)
+ m := re.FindStringSubmatch(string(out))
+ if len(m) > 1 {
+ tag = m[1]
+ break
+ }
+ }
+ }
+ if tag == "" && v.TagSyncDefault != "" {
+ return v.run(dir, v.TagSyncDefault)
+ }
+ return v.run(dir, v.TagSyncCmd, "tag", tag)
+}
+
+// Log logs the changes for the repo in dir.
+// dir must be a valid VCS repo compatible with v.
+func (v *Cmd) Log(dir, logTemplate string) ([]byte, error) {
+ if err := v.Download(dir); err != nil {
+ return []byte{}, err
+ }
+
+ const N = 50 // how many revisions to grab
+ return v.runOutput(dir, v.LogCmd, "limit", strconv.Itoa(N), "template", logTemplate)
+}
+
+// LogAtRev logs the change for repo in dir at the rev revision.
+// dir must be a valid VCS repo compatible with v.
+// rev must be a valid revision for the repo in dir.
+func (v *Cmd) LogAtRev(dir, rev, logTemplate string) ([]byte, error) {
+ if err := v.Download(dir); err != nil {
+ return []byte{}, err
+ }
+
+ // Append revision flag to LogCmd.
+ logAtRevCmd := v.LogCmd + " --rev=" + rev
+ return v.runOutput(dir, logAtRevCmd, "limit", strconv.Itoa(1), "template", logTemplate)
+}
+
+// A vcsPath describes how to convert an import path into a
+// version control system and repository name.
+type vcsPath struct {
+ prefix string // prefix this description applies to
+ re string // pattern for import path
+ repo string // repository to use (expand with match of re)
+ vcs string // version control system to use (expand with match of re)
+ check func(match map[string]string) error // additional checks
+ ping bool // ping for scheme to use to download repo
+
+ regexp *regexp.Regexp // cached compiled form of re
+}
+
+// FromDir inspects dir and its parents to determine the
+// version control system and code repository to use.
+// On return, root is the import path
+// corresponding to the root of the repository
+// (thus root is a prefix of importPath).
+func FromDir(dir, srcRoot string) (vcs *Cmd, root string, err error) {
+ // Clean and double-check that dir is in (a subdirectory of) srcRoot.
+ dir = filepath.Clean(dir)
+ srcRoot = filepath.Clean(srcRoot)
+ if len(dir) <= len(srcRoot) || dir[len(srcRoot)] != filepath.Separator {
+ return nil, "", fmt.Errorf("directory %q is outside source root %q", dir, srcRoot)
+ }
+
+ for len(dir) > len(srcRoot) {
+ for _, vcs := range vcsList {
+ if fi, err := os.Stat(filepath.Join(dir, "."+vcs.Cmd)); err == nil && fi.IsDir() {
+ return vcs, dir[len(srcRoot)+1:], nil
+ }
+ }
+
+ // Move to parent.
+ ndir := filepath.Dir(dir)
+ if len(ndir) >= len(dir) {
+ // Shouldn't happen, but just in case, stop.
+ break
+ }
+ dir = ndir
+ }
+
+ return nil, "", fmt.Errorf("directory %q is not using a known version control system", dir)
+}
+
+// RepoRoot represents a version control system, a repo, and a root of
+// where to put it on disk.
+type RepoRoot struct {
+ VCS *Cmd
+
+ // repo is the repository URL, including scheme
+ Repo string
+
+ // root is the import path corresponding to the root of the
+ // repository
+ Root string
+}
+
+// RepoRootForImportPath analyzes importPath to determine the
+// version control system, and code repository to use.
+func RepoRootForImportPath(importPath string, verbose bool) (*RepoRoot, error) {
+ rr, err := RepoRootForImportPathStatic(importPath, "")
+ if err == errUnknownSite {
+ rr, err = RepoRootForImportDynamic(importPath, verbose)
+
+ // RepoRootForImportDynamic returns error detail
+ // that is irrelevant if the user didn't intend to use a
+ // dynamic import in the first place.
+ // Squelch it.
+ if err != nil {
+ if Verbose {
+ log.Printf("import %q: %v", importPath, err)
+ }
+ err = fmt.Errorf("unrecognized import path %q", importPath)
+ }
+ }
+
+ if err == nil && strings.Contains(importPath, "...") && strings.Contains(rr.Root, "...") {
+ // Do not allow wildcards in the repo root.
+ rr = nil
+ err = fmt.Errorf("cannot expand ... in %q", importPath)
+ }
+ return rr, err
+}
+
+var errUnknownSite = errors.New("dynamic lookup required to find mapping")
+
+// RepoRootForImportPathStatic attempts to map importPath to a
+// RepoRoot using the commonly-used VCS hosting sites in vcsPaths
+// (github.com/user/dir), or from a fully-qualified importPath already
+// containing its VCS type (foo.com/repo.git/dir)
+//
+// If scheme is non-empty, that scheme is forced.
+func RepoRootForImportPathStatic(importPath, scheme string) (*RepoRoot, error) {
+ if strings.Contains(importPath, "://") {
+ return nil, fmt.Errorf("invalid import path %q", importPath)
+ }
+ for _, srv := range vcsPaths {
+ if !strings.HasPrefix(importPath, srv.prefix) {
+ continue
+ }
+ m := srv.regexp.FindStringSubmatch(importPath)
+ if m == nil {
+ if srv.prefix != "" {
+ return nil, fmt.Errorf("invalid %s import path %q", srv.prefix, importPath)
+ }
+ continue
+ }
+
+ // Build map of named subexpression matches for expand.
+ match := map[string]string{
+ "prefix": srv.prefix,
+ "import": importPath,
+ }
+ for i, name := range srv.regexp.SubexpNames() {
+ if name != "" && match[name] == "" {
+ match[name] = m[i]
+ }
+ }
+ if srv.vcs != "" {
+ match["vcs"] = expand(match, srv.vcs)
+ }
+ if srv.repo != "" {
+ match["repo"] = expand(match, srv.repo)
+ }
+ if srv.check != nil {
+ if err := srv.check(match); err != nil {
+ return nil, err
+ }
+ }
+ vcs := ByCmd(match["vcs"])
+ if vcs == nil {
+ return nil, fmt.Errorf("unknown version control system %q", match["vcs"])
+ }
+ if srv.ping {
+ if scheme != "" {
+ match["repo"] = scheme + "://" + match["repo"]
+ } else {
+ for _, scheme := range vcs.Scheme {
+ if vcs.Ping(scheme, match["repo"]) == nil {
+ match["repo"] = scheme + "://" + match["repo"]
+ break
+ }
+ }
+ }
+ }
+ rr := &RepoRoot{
+ VCS: vcs,
+ Repo: match["repo"],
+ Root: match["root"],
+ }
+ return rr, nil
+ }
+ return nil, errUnknownSite
+}
+
+// RepoRootForImportDynamic finds a *RepoRoot for a custom domain that's not
+// statically known by RepoRootForImportPathStatic.
+//
+// This handles "vanity import paths" like "name.tld/pkg/foo".
+func RepoRootForImportDynamic(importPath string, verbose bool) (*RepoRoot, error) {
+ slash := strings.Index(importPath, "/")
+ if slash < 0 {
+ return nil, errors.New("import path doesn't contain a slash")
+ }
+ host := importPath[:slash]
+ if !strings.Contains(host, ".") {
+ return nil, errors.New("import path doesn't contain a hostname")
+ }
+ urlStr, body, err := httpsOrHTTP(importPath)
+ if err != nil {
+ return nil, fmt.Errorf("http/https fetch: %v", err)
+ }
+ defer body.Close()
+ imports, err := parseMetaGoImports(body)
+ if err != nil {
+ return nil, fmt.Errorf("parsing %s: %v", importPath, err)
+ }
+ metaImport, err := matchGoImport(imports, importPath)
+ if err != nil {
+ if err != errNoMatch {
+ return nil, fmt.Errorf("parse %s: %v", urlStr, err)
+ }
+ return nil, fmt.Errorf("parse %s: no go-import meta tags", urlStr)
+ }
+ if verbose {
+ log.Printf("get %q: found meta tag %#v at %s", importPath, metaImport, urlStr)
+ }
+ // If the import was "uni.edu/bob/project", which said the
+ // prefix was "uni.edu" and the RepoRoot was "evilroot.com",
+ // make sure we don't trust Bob and check out evilroot.com to
+ // "uni.edu" yet (possibly overwriting/preempting another
+ // non-evil student). Instead, first verify the root and see
+ // if it matches Bob's claim.
+ if metaImport.Prefix != importPath {
+ if verbose {
+ log.Printf("get %q: verifying non-authoritative meta tag", importPath)
+ }
+ urlStr0 := urlStr
+ urlStr, body, err = httpsOrHTTP(metaImport.Prefix)
+ if err != nil {
+ return nil, fmt.Errorf("fetch %s: %v", urlStr, err)
+ }
+ imports, err := parseMetaGoImports(body)
+ if err != nil {
+ return nil, fmt.Errorf("parsing %s: %v", importPath, err)
+ }
+ if len(imports) == 0 {
+ return nil, fmt.Errorf("fetch %s: no go-import meta tag", urlStr)
+ }
+ metaImport2, err := matchGoImport(imports, importPath)
+ if err != nil || metaImport != metaImport2 {
+ return nil, fmt.Errorf("%s and %s disagree about go-import for %s", urlStr0, urlStr, metaImport.Prefix)
+ }
+ }
+
+ if !strings.Contains(metaImport.RepoRoot, "://") {
+ return nil, fmt.Errorf("%s: invalid repo root %q; no scheme", urlStr, metaImport.RepoRoot)
+ }
+ rr := &RepoRoot{
+ VCS: ByCmd(metaImport.VCS),
+ Repo: metaImport.RepoRoot,
+ Root: metaImport.Prefix,
+ }
+ if rr.VCS == nil {
+ return nil, fmt.Errorf("%s: unknown vcs %q", urlStr, metaImport.VCS)
+ }
+ return rr, nil
+}
+
+// metaImport represents the parsed tags from HTML files.
+type metaImport struct {
+ Prefix, VCS, RepoRoot string
+}
+
+// errNoMatch is returned from matchGoImport when there's no applicable match.
+var errNoMatch = errors.New("no import match")
+
+// matchGoImport returns the metaImport from imports matching importPath.
+// An error is returned if there are multiple matches.
+// errNoMatch is returned if none match.
+func matchGoImport(imports []metaImport, importPath string) (_ metaImport, err error) {
+ match := -1
+ for i, im := range imports {
+ if !strings.HasPrefix(importPath, im.Prefix) {
+ continue
+ }
+ if match != -1 {
+ err = fmt.Errorf("multiple meta tags match import path %q", importPath)
+ return
+ }
+ match = i
+ }
+ if match == -1 {
+ err = errNoMatch
+ return
+ }
+ return imports[match], nil
+}
+
+// expand rewrites s to replace {k} with match[k] for each key k in match.
+func expand(match map[string]string, s string) string {
+ for k, v := range match {
+ s = strings.Replace(s, "{"+k+"}", v, -1)
+ }
+ return s
+}
+
+// vcsPaths lists the known vcs paths.
+var vcsPaths = []*vcsPath{
+ // Google Code - new syntax
+ {
+ prefix: "code.google.com/",
+ re: `^(?Pcode\.google\.com/[pr]/(?P[a-z0-9\-]+)(\.(?P[a-z0-9\-]+))?)(/[A-Za-z0-9_.\-]+)*$`,
+ repo: "https://{root}",
+ check: googleCodeVCS,
+ },
+
+ // Google Code - old syntax
+ {
+ re: `^(?P[a-z0-9_\-.]+)\.googlecode\.com/(git|hg|svn)(?P/.*)?$`,
+ check: oldGoogleCode,
+ },
+
+ // Github
+ {
+ prefix: "github.com/",
+ re: `^(?Pgithub\.com/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)*$`,
+ vcs: "git",
+ repo: "https://{root}",
+ check: noVCSSuffix,
+ },
+
+ // Bitbucket
+ {
+ prefix: "bitbucket.org/",
+ re: `^(?Pbitbucket\.org/(?P[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`,
+ repo: "https://{root}",
+ check: bitbucketVCS,
+ },
+
+ // Launchpad
+ {
+ prefix: "launchpad.net/",
+ re: `^(?Plaunchpad\.net/((?P[A-Za-z0-9_.\-]+)(?P/[A-Za-z0-9_.\-]+)?|~[A-Za-z0-9_.\-]+/(\+junk|[A-Za-z0-9_.\-]+)/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`,
+ vcs: "bzr",
+ repo: "https://{root}",
+ check: launchpadVCS,
+ },
+
+ // General syntax for any server.
+ {
+ re: `^(?P(?P([a-z0-9.\-]+\.)+[a-z0-9.\-]+(:[0-9]+)?/[A-Za-z0-9_.\-/]*?)\.(?Pbzr|git|hg|svn))(/[A-Za-z0-9_.\-]+)*$`,
+ ping: true,
+ },
+}
+
+func init() {
+ // fill in cached regexps.
+ // Doing this eagerly discovers invalid regexp syntax
+ // without having to run a command that needs that regexp.
+ for _, srv := range vcsPaths {
+ srv.regexp = regexp.MustCompile(srv.re)
+ }
+}
+
+// noVCSSuffix checks that the repository name does not
+// end in .foo for any version control system foo.
+// The usual culprit is ".git".
+func noVCSSuffix(match map[string]string) error {
+ repo := match["repo"]
+ for _, vcs := range vcsList {
+ if strings.HasSuffix(repo, "."+vcs.Cmd) {
+ return fmt.Errorf("invalid version control suffix in %s path", match["prefix"])
+ }
+ }
+ return nil
+}
+
+var googleCheckout = regexp.MustCompile(`id="checkoutcmd">(hg|git|svn)`)
+
+// googleCodeVCS determines the version control system for
+// a code.google.com repository, by scraping the project's
+// /source/checkout page.
+func googleCodeVCS(match map[string]string) error {
+ if err := noVCSSuffix(match); err != nil {
+ return err
+ }
+ data, err := httpGET(expand(match, "https://code.google.com/p/{project}/source/checkout?repo={subrepo}"))
+ if err != nil {
+ return err
+ }
+
+ if m := googleCheckout.FindSubmatch(data); m != nil {
+ if vcs := ByCmd(string(m[1])); vcs != nil {
+ // Subversion requires the old URLs.
+ // TODO: Test.
+ if vcs == vcsSvn {
+ if match["subrepo"] != "" {
+ return fmt.Errorf("sub-repositories not supported in Google Code Subversion projects")
+ }
+ match["repo"] = expand(match, "https://{project}.googlecode.com/svn")
+ }
+ match["vcs"] = vcs.Cmd
+ return nil
+ }
+ }
+
+ return fmt.Errorf("unable to detect version control system for code.google.com/ path")
+}
+
+// oldGoogleCode is invoked for old-style foo.googlecode.com paths.
+// It prints an error giving the equivalent new path.
+func oldGoogleCode(match map[string]string) error {
+ return fmt.Errorf("invalid Google Code import path: use %s instead",
+ expand(match, "code.google.com/p/{project}{path}"))
+}
+
+// bitbucketVCS determines the version control system for a
+// Bitbucket repository, by using the Bitbucket API.
+func bitbucketVCS(match map[string]string) error {
+ if err := noVCSSuffix(match); err != nil {
+ return err
+ }
+
+ var resp struct {
+ SCM string `json:"scm"`
+ }
+ url := expand(match, "https://api.bitbucket.org/1.0/repositories/{bitname}")
+ data, err := httpGET(url)
+ if err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &resp); err != nil {
+ return fmt.Errorf("decoding %s: %v", url, err)
+ }
+
+ if ByCmd(resp.SCM) != nil {
+ match["vcs"] = resp.SCM
+ if resp.SCM == "git" {
+ match["repo"] += ".git"
+ }
+ return nil
+ }
+
+ return fmt.Errorf("unable to detect version control system for bitbucket.org/ path")
+}
+
+// launchpadVCS solves the ambiguity for "lp.net/project/foo". In this case,
+// "foo" could be a series name registered in Launchpad with its own branch,
+// and it could also be the name of a directory within the main project
+// branch one level up.
+func launchpadVCS(match map[string]string) error {
+ if match["project"] == "" || match["series"] == "" {
+ return nil
+ }
+ _, err := httpGET(expand(match, "https://code.launchpad.net/{project}{series}/.bzr/branch-format"))
+ if err != nil {
+ match["root"] = expand(match, "launchpad.net/{project}")
+ match["repo"] = expand(match, "https://{root}")
+ }
+ return nil
+}
diff --git a/llgo/third_party/go.tools/go/vcs/vcs_test.go b/llgo/third_party/go.tools/go/vcs/vcs_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..0b8dd8c84f5f13f9d76039071092512d35d9a707
--- /dev/null
+++ b/llgo/third_party/go.tools/go/vcs/vcs_test.go
@@ -0,0 +1,86 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcs
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+// Test that RepoRootForImportPath creates the correct RepoRoot for a given importPath.
+// TODO(cmang): Add tests for SVN and BZR.
+func TestRepoRootForImportPath(t *testing.T) {
+ tests := []struct {
+ path string
+ want *RepoRoot
+ }{
+ {
+ "code.google.com/p/go",
+ &RepoRoot{
+ VCS: vcsHg,
+ Repo: "https://code.google.com/p/go",
+ },
+ },
+ {
+ "code.google.com/r/go",
+ &RepoRoot{
+ VCS: vcsHg,
+ Repo: "https://code.google.com/r/go",
+ },
+ },
+ {
+ "github.com/golang/groupcache",
+ &RepoRoot{
+ VCS: vcsGit,
+ Repo: "https://github.com/golang/groupcache",
+ },
+ },
+ }
+
+ for _, test := range tests {
+ got, err := RepoRootForImportPath(test.path, false)
+ if err != nil {
+ t.Errorf("RepoRootForImport(%q): %v", test.path, err)
+ continue
+ }
+ want := test.want
+ if got.VCS.Name != want.VCS.Name || got.Repo != want.Repo {
+ t.Errorf("RepoRootForImport(%q) = VCS(%s) Repo(%s), want VCS(%s) Repo(%s)", test.path, got.VCS, got.Repo, want.VCS, want.Repo)
+ }
+ }
+}
+
+// Test that FromDir correctly inspects a given directory and returns the right VCS.
+func TestFromDir(t *testing.T) {
+ type testStruct struct {
+ path string
+ want *Cmd
+ }
+
+ tests := make([]testStruct, len(vcsList))
+ tempDir, err := ioutil.TempDir("", "vcstest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tempDir)
+
+ for i, vcs := range vcsList {
+ tests[i] = testStruct{
+ filepath.Join(tempDir, vcs.Name, "."+vcs.Cmd),
+ vcs,
+ }
+ }
+
+ for _, test := range tests {
+ os.MkdirAll(test.path, 0755)
+ got, _, _ := FromDir(test.path, tempDir)
+ if got.Name != test.want.Name {
+ t.Errorf("FromDir(%q, %q) = %s, want %s", test.path, tempDir, got, test.want)
+ }
+ os.RemoveAll(test.path)
+ }
+}
diff --git a/llgo/third_party/go.tools/godoc/analysis/README b/llgo/third_party/go.tools/godoc/analysis/README
new file mode 100644
index 0000000000000000000000000000000000000000..411af1cbaf18ec9372d548666adc032d556defc1
--- /dev/null
+++ b/llgo/third_party/go.tools/godoc/analysis/README
@@ -0,0 +1,111 @@
+
+Type and Pointer Analysis to-do list
+====================================
+
+Alan Donovan
+
+
+Overall design
+--------------
+
+We should re-run the type and pointer analyses periodically,
+as we do with the indexer.
+
+Version skew: how to mitigate the bad effects of stale URLs in old pages?
+We could record the file's length/CRC32/mtime in the go/loader, and
+refuse to decorate it with links unless they match at serving time.
+
+Use the VFS mechanism when (a) enumerating packages and (b) loading
+them. (Requires planned changes to go/loader.)
+
+Future work: shard this using map/reduce for larger corpora.
+
+Testing: how does one test that a web page "looks right"?
+
+
+Bugs
+----
+
+(*ssa.Program).Create requires transitively error-free packages. We
+can make this more robust by making the requirement transitively free
+of "hard" errors; soft errors are fine.
+
+Markup of compiler errors is slightly buggy because they overlap with
+other selections (e.g. Idents). Fix.
+
+
+User Interface
+--------------
+
+CALLGRAPH:
+- Add a search box: given a search node, expand path from each entry
+ point to it.
+- Cause hovering over a given node to highlight that node, and all
+ nodes that are logically identical to it.
+- Initially expand the callgraph trees (but not their toggle divs).
+
+CALLEES:
+- The '(' links are not very discoverable. Highlight them?
+
+Type info:
+- In the source viewer's lower pane, use a toggle div around the
+ IMPLEMENTS and METHODSETS lists, like we do in the pacakge view.
+ Only expand them initially if short.
+- Include IMPLEMENTS and METHOD SETS information in search index.
+- URLs in IMPLEMENTS/METHOD SETS always link to source, even from the
+ package docs view. This makes sense for links to non-exported
+ types, but links to exported types and funcs should probably go to
+ other package docs.
+- Suppress toggle divs for empty method sets.
+
+Misc:
+- The [X] button in the lower pane is subject to scrolling.
+- Should the lower pane be floating? An iframe?
+ When we change document.location by clicking on a link, it will go away.
+ How do we prevent that (a la Gmail's chat windows)?
+- Progress/status: for each file, display its analysis status, one of:
+ - not in analysis scope
+ - type analysis running...
+ - type analysis complete
+ (+ optionally: there were type errors in this file)
+ And if PTA requested:
+ - type analysis complete; PTA not attempted due to type errors
+ - PTA running...
+ - PTA complete
+- Scroll the selection into view, e.g. the vertical center, or better
+ still, under the pointer (assuming we have a mouse).
+
+
+More features
+-------------
+
+Display the REFERRERS relation? (Useful but potentially large.)
+
+Display the INSTANTIATIONS relation? i.e. given a type T, show the set of
+syntactic constructs that can instantiate it:
+ var x T
+ x := T{...}
+ x = new(T)
+ x = make([]T, n)
+ etc
+ + all INSTANTIATIONS of all S defined as struct{t T} or [n]T
+(Potentially a lot of information.)
+(Add this to oracle too.)
+
+
+Optimisations
+-------------
+
+Each call to addLink takes a (per-file) lock. The locking is
+fine-grained so server latency isn't terrible, but overall it makes
+the link computation quite slow. Batch update might be better.
+
+Memory usage is now about 1.5GB for GOROOT + go.tools. It used to be 700MB.
+
+Optimize for time and space. The main slowdown is the network I/O
+time caused by an increase in page size of about 3x: about 2x from
+HTML, and 0.7--2.1x from JSON (unindented vs indented). The JSON
+contains a lot of filenames (e.g. 820 copies of 16 distinct
+filenames). 20% of the HTML is L%d spans (now disabled). The HTML
+also contains lots of tooltips for long struct/interface types.
+De-dup or just abbreviate? The actual formatting is very fast.
diff --git a/llgo/third_party/go.tools/godoc/analysis/analysis.go b/llgo/third_party/go.tools/godoc/analysis/analysis.go
new file mode 100644
index 0000000000000000000000000000000000000000..c657250c45fb5a84ef4f7aa2c6407c8a685a6d26
--- /dev/null
+++ b/llgo/third_party/go.tools/godoc/analysis/analysis.go
@@ -0,0 +1,617 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package analysis performs type and pointer analysis
+// and generates mark-up for the Go source view.
+//
+// The Run method populates a Result object by running type and
+// (optionally) pointer analysis. The Result object is thread-safe
+// and at all times may be accessed by a serving thread, even as it is
+// progressively populated as analysis facts are derived.
+//
+// The Result is a mapping from each godoc file URL
+// (e.g. /src/fmt/print.go) to information about that file. The
+// information is a list of HTML markup links and a JSON array of
+// structured data values. Some of the links call client-side
+// JavaScript functions that index this array.
+//
+// The analysis computes mark-up for the following relations:
+//
+// IMPORTS: for each ast.ImportSpec, the package that it denotes.
+//
+// RESOLUTION: for each ast.Ident, its kind and type, and the location
+// of its definition.
+//
+// METHOD SETS, IMPLEMENTS: for each ast.Ident defining a named type,
+// its method-set, the set of interfaces it implements or is
+// implemented by, and its size/align values.
+//
+// CALLERS, CALLEES: for each function declaration ('func' token), its
+// callers, and for each call-site ('(' token), its callees.
+//
+// CALLGRAPH: the package docs include an interactive viewer for the
+// intra-package call graph of "fmt".
+//
+// CHANNEL PEERS: for each channel operation make/<-/close, the set of
+// other channel ops that alias the same channel(s).
+//
+// ERRORS: for each locus of a frontend (scanner/parser/type) error, the
+// location is highlighted in red and hover text provides the compiler
+// error message.
+//
+package analysis
+
+import (
+ "fmt"
+ "go/build"
+ "go/scanner"
+ "go/token"
+ "html"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/pointer"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/ssa/ssautil"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// -- links ------------------------------------------------------------
+
+// A Link is an HTML decoration of the bytes [Start, End) of a file.
+// Write is called before/after those bytes to emit the mark-up.
+type Link interface {
+ Start() int
+ End() int
+ Write(w io.Writer, _ int, start bool) // the godoc.LinkWriter signature
+}
+
+// An element.
+type aLink struct {
+ start, end int // =godoc.Segment
+ title string // hover text
+ onclick string // JS code (NB: trusted)
+ href string // URL (NB: trusted)
+}
+
+func (a aLink) Start() int { return a.start }
+func (a aLink) End() int { return a.end }
+func (a aLink) Write(w io.Writer, _ int, start bool) {
+ if start {
+ fmt.Fprintf(w, ` ")
+ } else {
+ fmt.Fprintf(w, " ")
+ }
+}
+
+// An element.
+type errorLink struct {
+ start int
+ msg string
+}
+
+func (e errorLink) Start() int { return e.start }
+func (e errorLink) End() int { return e.start + 1 }
+
+func (e errorLink) Write(w io.Writer, _ int, start bool) {
+ // causes havoc, not sure why, so use .
+ if start {
+ fmt.Fprintf(w, ` `, html.EscapeString(e.msg))
+ } else {
+ fmt.Fprintf(w, " ")
+ }
+}
+
+// -- fileInfo ---------------------------------------------------------
+
+// FileInfo holds analysis information for the source file view.
+// Clients must not mutate it.
+type FileInfo struct {
+ Data []interface{} // JSON serializable values
+ Links []Link // HTML link markup
+}
+
+// A fileInfo is the server's store of hyperlinks and JSON data for a
+// particular file.
+type fileInfo struct {
+ mu sync.Mutex
+ data []interface{} // JSON objects
+ links []Link
+ sorted bool
+ hasErrors bool // TODO(adonovan): surface this in the UI
+}
+
+// addLink adds a link to the Go source file fi.
+func (fi *fileInfo) addLink(link Link) {
+ fi.mu.Lock()
+ fi.links = append(fi.links, link)
+ fi.sorted = false
+ if _, ok := link.(errorLink); ok {
+ fi.hasErrors = true
+ }
+ fi.mu.Unlock()
+}
+
+// addData adds the structured value x to the JSON data for the Go
+// source file fi. Its index is returned.
+func (fi *fileInfo) addData(x interface{}) int {
+ fi.mu.Lock()
+ index := len(fi.data)
+ fi.data = append(fi.data, x)
+ fi.mu.Unlock()
+ return index
+}
+
+// get returns the file info in external form.
+// Callers must not mutate its fields.
+func (fi *fileInfo) get() FileInfo {
+ var r FileInfo
+ // Copy slices, to avoid races.
+ fi.mu.Lock()
+ r.Data = append(r.Data, fi.data...)
+ if !fi.sorted {
+ sort.Sort(linksByStart(fi.links))
+ fi.sorted = true
+ }
+ r.Links = append(r.Links, fi.links...)
+ fi.mu.Unlock()
+ return r
+}
+
+// PackageInfo holds analysis information for the package view.
+// Clients must not mutate it.
+type PackageInfo struct {
+ CallGraph []*PCGNodeJSON
+ CallGraphIndex map[string]int
+ Types []*TypeInfoJSON
+}
+
+type pkgInfo struct {
+ mu sync.Mutex
+ callGraph []*PCGNodeJSON
+ callGraphIndex map[string]int // keys are (*ssa.Function).RelString()
+ types []*TypeInfoJSON // type info for exported types
+}
+
+func (pi *pkgInfo) setCallGraph(callGraph []*PCGNodeJSON, callGraphIndex map[string]int) {
+ pi.mu.Lock()
+ pi.callGraph = callGraph
+ pi.callGraphIndex = callGraphIndex
+ pi.mu.Unlock()
+}
+
+func (pi *pkgInfo) addType(t *TypeInfoJSON) {
+ pi.mu.Lock()
+ pi.types = append(pi.types, t)
+ pi.mu.Unlock()
+}
+
+// get returns the package info in external form.
+// Callers must not mutate its fields.
+func (pi *pkgInfo) get() PackageInfo {
+ var r PackageInfo
+ // Copy slices, to avoid races.
+ pi.mu.Lock()
+ r.CallGraph = append(r.CallGraph, pi.callGraph...)
+ r.CallGraphIndex = pi.callGraphIndex
+ r.Types = append(r.Types, pi.types...)
+ pi.mu.Unlock()
+ return r
+}
+
+// -- Result -----------------------------------------------------------
+
+// Result contains the results of analysis.
+// The result contains a mapping from filenames to a set of HTML links
+// and JavaScript data referenced by the links.
+type Result struct {
+ mu sync.Mutex // guards maps (but not their contents)
+ status string // global analysis status
+ fileInfos map[string]*fileInfo // keys are godoc file URLs
+ pkgInfos map[string]*pkgInfo // keys are import paths
+}
+
+// fileInfo returns the fileInfo for the specified godoc file URL,
+// constructing it as needed. Thread-safe.
+func (res *Result) fileInfo(url string) *fileInfo {
+ res.mu.Lock()
+ fi, ok := res.fileInfos[url]
+ if !ok {
+ if res.fileInfos == nil {
+ res.fileInfos = make(map[string]*fileInfo)
+ }
+ fi = new(fileInfo)
+ res.fileInfos[url] = fi
+ }
+ res.mu.Unlock()
+ return fi
+}
+
+// Status returns a human-readable description of the current analysis status.
+func (res *Result) Status() string {
+ res.mu.Lock()
+ defer res.mu.Unlock()
+ return res.status
+}
+
+func (res *Result) setStatusf(format string, args ...interface{}) {
+ res.mu.Lock()
+ res.status = fmt.Sprintf(format, args...)
+ log.Printf(format, args...)
+ res.mu.Unlock()
+}
+
+// FileInfo returns new slices containing opaque JSON values and the
+// HTML link markup for the specified godoc file URL. Thread-safe.
+// Callers must not mutate the elements.
+// It returns "zero" if no data is available.
+//
+func (res *Result) FileInfo(url string) (fi FileInfo) {
+ return res.fileInfo(url).get()
+}
+
+// pkgInfo returns the pkgInfo for the specified import path,
+// constructing it as needed. Thread-safe.
+func (res *Result) pkgInfo(importPath string) *pkgInfo {
+ res.mu.Lock()
+ pi, ok := res.pkgInfos[importPath]
+ if !ok {
+ if res.pkgInfos == nil {
+ res.pkgInfos = make(map[string]*pkgInfo)
+ }
+ pi = new(pkgInfo)
+ res.pkgInfos[importPath] = pi
+ }
+ res.mu.Unlock()
+ return pi
+}
+
+// PackageInfo returns new slices of JSON values for the callgraph and
+// type info for the specified package. Thread-safe.
+// Callers must not mutate its fields.
+// PackageInfo returns "zero" if no data is available.
+//
+func (res *Result) PackageInfo(importPath string) PackageInfo {
+ return res.pkgInfo(importPath).get()
+}
+
+// -- analysis ---------------------------------------------------------
+
+type analysis struct {
+ result *Result
+ prog *ssa.Program
+ ops []chanOp // all channel ops in program
+ allNamed []*types.Named // all named types in the program
+ ptaConfig pointer.Config
+ path2url map[string]string // maps openable path to godoc file URL (/src/fmt/print.go)
+ pcgs map[*ssa.Package]*packageCallGraph
+}
+
+// fileAndOffset returns the file and offset for a given pos.
+func (a *analysis) fileAndOffset(pos token.Pos) (fi *fileInfo, offset int) {
+ return a.fileAndOffsetPosn(a.prog.Fset.Position(pos))
+}
+
+// fileAndOffsetPosn returns the file and offset for a given position.
+func (a *analysis) fileAndOffsetPosn(posn token.Position) (fi *fileInfo, offset int) {
+ url := a.path2url[posn.Filename]
+ return a.result.fileInfo(url), posn.Offset
+}
+
+// posURL returns the URL of the source extent [pos, pos+len).
+func (a *analysis) posURL(pos token.Pos, len int) string {
+ if pos == token.NoPos {
+ return ""
+ }
+ posn := a.prog.Fset.Position(pos)
+ url := a.path2url[posn.Filename]
+ return fmt.Sprintf("%s?s=%d:%d#L%d",
+ url, posn.Offset, posn.Offset+len, posn.Line)
+}
+
+// ----------------------------------------------------------------------
+
+// Run runs program analysis and computes the resulting markup,
+// populating *result in a thread-safe manner, first with type
+// information then later with pointer analysis information if
+// enabled by the pta flag.
+//
+func Run(pta bool, result *Result) {
+ conf := loader.Config{
+ SourceImports: true,
+ AllowErrors: true,
+ }
+
+ // Silence the default error handler.
+ // Don't print all errors; we'll report just
+ // one per errant package later.
+ conf.TypeChecker.Error = func(e error) {}
+
+ var roots, args []string // roots[i] ends with os.PathSeparator
+
+ // Enumerate packages in $GOROOT.
+ root := filepath.Join(runtime.GOROOT(), "src") + string(os.PathSeparator)
+ roots = append(roots, root)
+ args = allPackages(root)
+ log.Printf("GOROOT=%s: %s\n", root, args)
+
+ // Enumerate packages in $GOPATH.
+ for i, dir := range filepath.SplitList(build.Default.GOPATH) {
+ root := filepath.Join(dir, "src") + string(os.PathSeparator)
+ roots = append(roots, root)
+ pkgs := allPackages(root)
+ log.Printf("GOPATH[%d]=%s: %s\n", i, root, pkgs)
+ args = append(args, pkgs...)
+ }
+
+ // Uncomment to make startup quicker during debugging.
+ //args = []string{"llvm.org/llgo/third_party/go.tools/cmd/godoc"}
+ //args = []string{"fmt"}
+
+ if _, err := conf.FromArgs(args, true); err != nil {
+ // TODO(adonovan): degrade gracefully, not fail totally.
+ // (The crippling case is a parse error in an external test file.)
+ result.setStatusf("Analysis failed: %s.", err) // import error
+ return
+ }
+
+ result.setStatusf("Loading and type-checking packages...")
+ iprog, err := conf.Load()
+ if iprog != nil {
+ // Report only the first error of each package.
+ for _, info := range iprog.AllPackages {
+ for _, err := range info.Errors {
+ fmt.Fprintln(os.Stderr, err)
+ break
+ }
+ }
+ log.Printf("Loaded %d packages.", len(iprog.AllPackages))
+ }
+ if err != nil {
+ result.setStatusf("Loading failed: %s.\n", err)
+ return
+ }
+
+ // Create SSA-form program representation.
+ // Only the transitively error-free packages are used.
+ prog := ssa.Create(iprog, ssa.GlobalDebug)
+
+ // Compute the set of main packages, including testmain.
+ allPackages := prog.AllPackages()
+ var mainPkgs []*ssa.Package
+ if testmain := prog.CreateTestMainPackage(allPackages...); testmain != nil {
+ mainPkgs = append(mainPkgs, testmain)
+ }
+ for _, pkg := range allPackages {
+ if pkg.Object.Name() == "main" && pkg.Func("main") != nil {
+ mainPkgs = append(mainPkgs, pkg)
+ }
+ }
+ log.Print("Transitively error-free main packages: ", mainPkgs)
+
+ // Build SSA code for bodies of all functions in the whole program.
+ result.setStatusf("Constructing SSA form...")
+ prog.BuildAll()
+ log.Print("SSA construction complete")
+
+ a := analysis{
+ result: result,
+ prog: prog,
+ pcgs: make(map[*ssa.Package]*packageCallGraph),
+ }
+
+ // Build a mapping from openable filenames to godoc file URLs,
+ // i.e. "/src/" plus path relative to GOROOT/src or GOPATH[i]/src.
+ a.path2url = make(map[string]string)
+ for _, info := range iprog.AllPackages {
+ nextfile:
+ for _, f := range info.Files {
+ if f.Pos() == 0 {
+ continue // e.g. files generated by cgo
+ }
+ abs := iprog.Fset.File(f.Pos()).Name()
+ // Find the root to which this file belongs.
+ for _, root := range roots {
+ rel := strings.TrimPrefix(abs, root)
+ if len(rel) < len(abs) {
+ a.path2url[abs] = "/src/" + filepath.ToSlash(rel)
+ continue nextfile
+ }
+ }
+
+ log.Printf("Can't locate file %s (package %q) beneath any root",
+ abs, info.Pkg.Path())
+ }
+ }
+
+ // Add links for scanner, parser, type-checker errors.
+ // TODO(adonovan): fix: these links can overlap with
+ // identifier markup, causing the renderer to emit some
+ // characters twice.
+ errors := make(map[token.Position][]string)
+ for _, info := range iprog.AllPackages {
+ for _, err := range info.Errors {
+ switch err := err.(type) {
+ case types.Error:
+ posn := a.prog.Fset.Position(err.Pos)
+ errors[posn] = append(errors[posn], err.Msg)
+ case scanner.ErrorList:
+ for _, e := range err {
+ errors[e.Pos] = append(errors[e.Pos], e.Msg)
+ }
+ default:
+ log.Printf("Package %q has error (%T) without position: %v\n",
+ info.Pkg.Path(), err, err)
+ }
+ }
+ }
+ for posn, errs := range errors {
+ fi, offset := a.fileAndOffsetPosn(posn)
+ fi.addLink(errorLink{
+ start: offset,
+ msg: strings.Join(errs, "\n"),
+ })
+ }
+
+ // ---------- type-based analyses ----------
+
+ // Compute the all-pairs IMPLEMENTS relation.
+ // Collect all named types, even local types
+ // (which can have methods via promotion)
+ // and the built-in "error".
+ errorType := types.Universe.Lookup("error").Type().(*types.Named)
+ a.allNamed = append(a.allNamed, errorType)
+ for _, info := range iprog.AllPackages {
+ for _, obj := range info.Defs {
+ if obj, ok := obj.(*types.TypeName); ok {
+ a.allNamed = append(a.allNamed, obj.Type().(*types.Named))
+ }
+ }
+ }
+ log.Print("Computing implements relation...")
+ facts := computeImplements(&a.prog.MethodSets, a.allNamed)
+
+ // Add the type-based analysis results.
+ log.Print("Extracting type info...")
+ for _, info := range iprog.AllPackages {
+ a.doTypeInfo(info, facts)
+ }
+
+ a.visitInstrs(pta)
+
+ result.setStatusf("Type analysis complete.")
+
+ if pta {
+ a.pointer(mainPkgs)
+ }
+}
+
+// visitInstrs visits all SSA instructions in the program.
+func (a *analysis) visitInstrs(pta bool) {
+ log.Print("Visit instructions...")
+ for fn := range ssautil.AllFunctions(a.prog) {
+ for _, b := range fn.Blocks {
+ for _, instr := range b.Instrs {
+ // CALLEES (static)
+ // (Dynamic calls require pointer analysis.)
+ //
+ // We use the SSA representation to find the static callee,
+ // since in many cases it does better than the
+ // types.Info.{Refs,Selection} information. For example:
+ //
+ // defer func(){}() // static call to anon function
+ // f := func(){}; f() // static call to anon function
+ // f := fmt.Println; f() // static call to named function
+ //
+ // The downside is that we get no static callee information
+ // for packages that (transitively) contain errors.
+ if site, ok := instr.(ssa.CallInstruction); ok {
+ if callee := site.Common().StaticCallee(); callee != nil {
+ // TODO(adonovan): callgraph: elide wrappers.
+ // (Do static calls ever go to wrappers?)
+ if site.Common().Pos() != token.NoPos {
+ a.addCallees(site, []*ssa.Function{callee})
+ }
+ }
+ }
+
+ if !pta {
+ continue
+ }
+
+ // CHANNEL PEERS
+ // Collect send/receive/close instructions in the whole ssa.Program.
+ for _, op := range chanOps(instr) {
+ a.ops = append(a.ops, op)
+ a.ptaConfig.AddQuery(op.ch) // add channel ssa.Value to PTA query
+ }
+ }
+ }
+ }
+ log.Print("Visit instructions complete")
+}
+
+// pointer runs the pointer analysis.
+func (a *analysis) pointer(mainPkgs []*ssa.Package) {
+ // Run the pointer analysis and build the complete callgraph.
+ a.ptaConfig.Mains = mainPkgs
+ a.ptaConfig.BuildCallGraph = true
+ a.ptaConfig.Reflection = false // (for now)
+
+ a.result.setStatusf("Pointer analysis running...")
+
+ ptares, err := pointer.Analyze(&a.ptaConfig)
+ if err != nil {
+ // If this happens, it indicates a bug.
+ a.result.setStatusf("Pointer analysis failed: %s.", err)
+ return
+ }
+ log.Print("Pointer analysis complete.")
+
+ // Add the results of pointer analysis.
+
+ a.result.setStatusf("Computing channel peers...")
+ a.doChannelPeers(ptares.Queries)
+ a.result.setStatusf("Computing dynamic call graph edges...")
+ a.doCallgraph(ptares.CallGraph)
+
+ a.result.setStatusf("Analysis complete.")
+}
+
+type linksByStart []Link
+
+func (a linksByStart) Less(i, j int) bool { return a[i].Start() < a[j].Start() }
+func (a linksByStart) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a linksByStart) Len() int { return len(a) }
+
+// allPackages returns a new sorted slice of all packages beneath the
+// specified package root directory, e.g. $GOROOT/src or $GOPATH/src.
+// Derived from from go/ssa/stdlib_test.go
+// root must end with os.PathSeparator.
+//
+// TODO(adonovan): use buildutil.AllPackages when the tree thaws.
+func allPackages(root string) []string {
+ var pkgs []string
+ filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
+ if info == nil {
+ return nil // non-existent root directory?
+ }
+ if !info.IsDir() {
+ return nil // not a directory
+ }
+ // Prune the search if we encounter any of these names:
+ base := filepath.Base(path)
+ if base == "testdata" || strings.HasPrefix(base, ".") {
+ return filepath.SkipDir
+ }
+ pkg := filepath.ToSlash(strings.TrimPrefix(path, root))
+ switch pkg {
+ case "builtin":
+ return filepath.SkipDir
+ case "":
+ return nil // ignore root of tree
+ }
+ pkgs = append(pkgs, pkg)
+ return nil
+ })
+ return pkgs
+}
diff --git a/llgo/third_party/go.tools/godoc/analysis/callgraph.go b/llgo/third_party/go.tools/godoc/analysis/callgraph.go
new file mode 100644
index 0000000000000000000000000000000000000000..a20c12fbd1f74997fe978fc74a65914213967de4
--- /dev/null
+++ b/llgo/third_party/go.tools/godoc/analysis/callgraph.go
@@ -0,0 +1,351 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package analysis
+
+// This file computes the CALLERS and CALLEES relations from the call
+// graph. CALLERS/CALLEES information is displayed in the lower pane
+// when a "func" token or ast.CallExpr.Lparen is clicked, respectively.
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "log"
+ "math/big"
+ "sort"
+
+ "llvm.org/llgo/third_party/go.tools/go/callgraph"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// doCallgraph computes the CALLEES and CALLERS relations.
+func (a *analysis) doCallgraph(cg *callgraph.Graph) {
+ log.Print("Deleting synthetic nodes...")
+ // TODO(adonovan): opt: DeleteSyntheticNodes is asymptotically
+ // inefficient and can be (unpredictably) slow.
+ cg.DeleteSyntheticNodes()
+ log.Print("Synthetic nodes deleted")
+
+ // Populate nodes of package call graphs (PCGs).
+ for _, n := range cg.Nodes {
+ a.pcgAddNode(n.Func)
+ }
+ // Within each PCG, sort funcs by name.
+ for _, pcg := range a.pcgs {
+ pcg.sortNodes()
+ }
+
+ calledFuncs := make(map[ssa.CallInstruction]map[*ssa.Function]bool)
+ callingSites := make(map[*ssa.Function]map[ssa.CallInstruction]bool)
+ for _, n := range cg.Nodes {
+ for _, e := range n.Out {
+ if e.Site == nil {
+ continue // a call from a synthetic node such as
+ }
+
+ // Add (site pos, callee) to calledFuncs.
+ // (Dynamic calls only.)
+ callee := e.Callee.Func
+
+ a.pcgAddEdge(n.Func, callee)
+
+ if callee.Synthetic != "" {
+ continue // call of a package initializer
+ }
+
+ if e.Site.Common().StaticCallee() == nil {
+ // dynamic call
+ // (CALLEES information for static calls
+ // is computed using SSA information.)
+ lparen := e.Site.Common().Pos()
+ if lparen != token.NoPos {
+ fns := calledFuncs[e.Site]
+ if fns == nil {
+ fns = make(map[*ssa.Function]bool)
+ calledFuncs[e.Site] = fns
+ }
+ fns[callee] = true
+ }
+ }
+
+ // Add (callee, site) to callingSites.
+ fns := callingSites[callee]
+ if fns == nil {
+ fns = make(map[ssa.CallInstruction]bool)
+ callingSites[callee] = fns
+ }
+ fns[e.Site] = true
+ }
+ }
+
+ // CALLEES.
+ log.Print("Callees...")
+ for site, fns := range calledFuncs {
+ var funcs funcsByPos
+ for fn := range fns {
+ funcs = append(funcs, fn)
+ }
+ sort.Sort(funcs)
+
+ a.addCallees(site, funcs)
+ }
+
+ // CALLERS
+ log.Print("Callers...")
+ for callee, sites := range callingSites {
+ pos := funcToken(callee)
+ if pos == token.NoPos {
+ log.Printf("CALLERS: skipping %s: no pos", callee)
+ continue
+ }
+
+ var this *types.Package // for relativizing names
+ if callee.Pkg != nil {
+ this = callee.Pkg.Object
+ }
+
+ // Compute sites grouped by parent, with text and URLs.
+ sitesByParent := make(map[*ssa.Function]sitesByPos)
+ for site := range sites {
+ fn := site.Parent()
+ sitesByParent[fn] = append(sitesByParent[fn], site)
+ }
+ var funcs funcsByPos
+ for fn := range sitesByParent {
+ funcs = append(funcs, fn)
+ }
+ sort.Sort(funcs)
+
+ v := callersJSON{
+ Callee: callee.String(),
+ Callers: []callerJSON{}, // (JS wants non-nil)
+ }
+ for _, fn := range funcs {
+ caller := callerJSON{
+ Func: prettyFunc(this, fn),
+ Sites: []anchorJSON{}, // (JS wants non-nil)
+ }
+ sites := sitesByParent[fn]
+ sort.Sort(sites)
+ for _, site := range sites {
+ pos := site.Common().Pos()
+ if pos != token.NoPos {
+ caller.Sites = append(caller.Sites, anchorJSON{
+ Text: fmt.Sprintf("%d", a.prog.Fset.Position(pos).Line),
+ Href: a.posURL(pos, len("(")),
+ })
+ }
+ }
+ v.Callers = append(v.Callers, caller)
+ }
+
+ fi, offset := a.fileAndOffset(pos)
+ fi.addLink(aLink{
+ start: offset,
+ end: offset + len("func"),
+ title: fmt.Sprintf("%d callers", len(sites)),
+ onclick: fmt.Sprintf("onClickCallers(%d)", fi.addData(v)),
+ })
+ }
+
+ // PACKAGE CALLGRAPH
+ log.Print("Package call graph...")
+ for pkg, pcg := range a.pcgs {
+ // Maps (*ssa.Function).RelString() to index in JSON CALLGRAPH array.
+ index := make(map[string]int)
+
+ // Treat exported functions (and exported methods of
+ // exported named types) as roots even if they aren't
+ // actually called from outside the package.
+ for i, n := range pcg.nodes {
+ if i == 0 || n.fn.Object() == nil || !n.fn.Object().Exported() {
+ continue
+ }
+ recv := n.fn.Signature.Recv()
+ if recv == nil || deref(recv.Type()).(*types.Named).Obj().Exported() {
+ roots := &pcg.nodes[0].edges
+ roots.SetBit(roots, i, 1)
+ }
+ index[n.fn.RelString(pkg.Object)] = i
+ }
+
+ json := a.pcgJSON(pcg)
+
+ // TODO(adonovan): pkg.Path() is not unique!
+ // It is possible to declare a non-test package called x_test.
+ a.result.pkgInfo(pkg.Object.Path()).setCallGraph(json, index)
+ }
+}
+
+// addCallees adds client data and links for the facts that site calls fns.
+func (a *analysis) addCallees(site ssa.CallInstruction, fns []*ssa.Function) {
+ v := calleesJSON{
+ Descr: site.Common().Description(),
+ Callees: []anchorJSON{}, // (JS wants non-nil)
+ }
+ var this *types.Package // for relativizing names
+ if p := site.Parent().Package(); p != nil {
+ this = p.Object
+ }
+
+ for _, fn := range fns {
+ v.Callees = append(v.Callees, anchorJSON{
+ Text: prettyFunc(this, fn),
+ Href: a.posURL(funcToken(fn), len("func")),
+ })
+ }
+
+ fi, offset := a.fileAndOffset(site.Common().Pos())
+ fi.addLink(aLink{
+ start: offset,
+ end: offset + len("("),
+ title: fmt.Sprintf("%d callees", len(v.Callees)),
+ onclick: fmt.Sprintf("onClickCallees(%d)", fi.addData(v)),
+ })
+}
+
+// -- utilities --------------------------------------------------------
+
+// stable order within packages but undefined across packages.
+type funcsByPos []*ssa.Function
+
+func (a funcsByPos) Less(i, j int) bool { return a[i].Pos() < a[j].Pos() }
+func (a funcsByPos) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a funcsByPos) Len() int { return len(a) }
+
+type sitesByPos []ssa.CallInstruction
+
+func (a sitesByPos) Less(i, j int) bool { return a[i].Common().Pos() < a[j].Common().Pos() }
+func (a sitesByPos) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a sitesByPos) Len() int { return len(a) }
+
+func funcToken(fn *ssa.Function) token.Pos {
+ switch syntax := fn.Syntax().(type) {
+ case *ast.FuncLit:
+ return syntax.Type.Func
+ case *ast.FuncDecl:
+ return syntax.Type.Func
+ }
+ return token.NoPos
+}
+
+// prettyFunc pretty-prints fn for the user interface.
+// TODO(adonovan): return HTML so we have more markup freedom.
+func prettyFunc(this *types.Package, fn *ssa.Function) string {
+ if fn.Parent() != nil {
+ return fmt.Sprintf("%s in %s",
+ types.TypeString(this, fn.Signature),
+ prettyFunc(this, fn.Parent()))
+ }
+ if fn.Synthetic != "" && fn.Name() == "init" {
+ // (This is the actual initializer, not a declared 'func init').
+ if fn.Pkg.Object == this {
+ return "package initializer"
+ }
+ return fmt.Sprintf("%q package initializer", fn.Pkg.Object.Path())
+ }
+ return fn.RelString(this)
+}
+
+// -- intra-package callgraph ------------------------------------------
+
+// pcgNode represents a node in the package call graph (PCG).
+type pcgNode struct {
+ fn *ssa.Function
+ pretty string // cache of prettyFunc(fn)
+ edges big.Int // set of callee func indices
+}
+
+// A packageCallGraph represents the intra-package edges of the global call graph.
+// The zeroth node indicates "all external functions".
+type packageCallGraph struct {
+ nodeIndex map[*ssa.Function]int // maps func to node index (a small int)
+ nodes []*pcgNode // maps node index to node
+}
+
+// sortNodes populates pcg.nodes in name order and updates the nodeIndex.
+func (pcg *packageCallGraph) sortNodes() {
+ nodes := make([]*pcgNode, 0, len(pcg.nodeIndex))
+ nodes = append(nodes, &pcgNode{fn: nil, pretty: ""})
+ for fn := range pcg.nodeIndex {
+ nodes = append(nodes, &pcgNode{
+ fn: fn,
+ pretty: prettyFunc(fn.Pkg.Object, fn),
+ })
+ }
+ sort.Sort(pcgNodesByPretty(nodes[1:]))
+ for i, n := range nodes {
+ pcg.nodeIndex[n.fn] = i
+ }
+ pcg.nodes = nodes
+}
+
+func (pcg *packageCallGraph) addEdge(caller, callee *ssa.Function) {
+ var callerIndex int
+ if caller.Pkg == callee.Pkg {
+ // intra-package edge
+ callerIndex = pcg.nodeIndex[caller]
+ if callerIndex < 1 {
+ panic(caller)
+ }
+ }
+ edges := &pcg.nodes[callerIndex].edges
+ edges.SetBit(edges, pcg.nodeIndex[callee], 1)
+}
+
+func (a *analysis) pcgAddNode(fn *ssa.Function) {
+ if fn.Pkg == nil {
+ return
+ }
+ pcg, ok := a.pcgs[fn.Pkg]
+ if !ok {
+ pcg = &packageCallGraph{nodeIndex: make(map[*ssa.Function]int)}
+ a.pcgs[fn.Pkg] = pcg
+ }
+ pcg.nodeIndex[fn] = -1
+}
+
+func (a *analysis) pcgAddEdge(caller, callee *ssa.Function) {
+ if callee.Pkg != nil {
+ a.pcgs[callee.Pkg].addEdge(caller, callee)
+ }
+}
+
+// pcgJSON returns a new slice of callgraph JSON values.
+func (a *analysis) pcgJSON(pcg *packageCallGraph) []*PCGNodeJSON {
+ var nodes []*PCGNodeJSON
+ for _, n := range pcg.nodes {
+
+ // TODO(adonovan): why is there no good way to iterate
+ // over the set bits of a big.Int?
+ var callees []int
+ nbits := n.edges.BitLen()
+ for j := 0; j < nbits; j++ {
+ if n.edges.Bit(j) == 1 {
+ callees = append(callees, j)
+ }
+ }
+
+ var pos token.Pos
+ if n.fn != nil {
+ pos = funcToken(n.fn)
+ }
+ nodes = append(nodes, &PCGNodeJSON{
+ Func: anchorJSON{
+ Text: n.pretty,
+ Href: a.posURL(pos, len("func")),
+ },
+ Callees: callees,
+ })
+ }
+ return nodes
+}
+
+type pcgNodesByPretty []*pcgNode
+
+func (a pcgNodesByPretty) Less(i, j int) bool { return a[i].pretty < a[j].pretty }
+func (a pcgNodesByPretty) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a pcgNodesByPretty) Len() int { return len(a) }
diff --git a/llgo/third_party/go.tools/godoc/analysis/implements.go b/llgo/third_party/go.tools/godoc/analysis/implements.go
new file mode 100644
index 0000000000000000000000000000000000000000..f8e2473dcd9b394f998d9a9ace624b307937b399
--- /dev/null
+++ b/llgo/third_party/go.tools/godoc/analysis/implements.go
@@ -0,0 +1,194 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package analysis
+
+// This file computes the "implements" relation over all pairs of
+// named types in the program. (The mark-up is done by typeinfo.go.)
+
+// TODO(adonovan): do we want to report implements(C, I) where C and I
+// belong to different packages and at least one is not exported?
+
+import (
+ "sort"
+
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+// computeImplements computes the "implements" relation over all pairs
+// of named types in allNamed.
+func computeImplements(cache *types.MethodSetCache, allNamed []*types.Named) map[*types.Named]implementsFacts {
+ // Information about a single type's method set.
+ type msetInfo struct {
+ typ types.Type
+ mset *types.MethodSet
+ mask1, mask2 uint64
+ }
+
+ initMsetInfo := func(info *msetInfo, typ types.Type) {
+ info.typ = typ
+ info.mset = cache.MethodSet(typ)
+ for i := 0; i < info.mset.Len(); i++ {
+ name := info.mset.At(i).Obj().Name()
+ info.mask1 |= 1 << methodBit(name[0])
+ info.mask2 |= 1 << methodBit(name[len(name)-1])
+ }
+ }
+
+ // satisfies(T, U) reports whether type T satisfies type U.
+ // U must be an interface.
+ //
+ // Since there are thousands of types (and thus millions of
+ // pairs of types) and types.Assignable(T, U) is relatively
+ // expensive, we compute assignability directly from the
+ // method sets. (At least one of T and U must be an
+ // interface.)
+ //
+ // We use a trick (thanks gri!) related to a Bloom filter to
+ // quickly reject most tests, which are false. For each
+ // method set, we precompute a mask, a set of bits, one per
+ // distinct initial byte of each method name. Thus the mask
+ // for io.ReadWriter would be {'R','W'}. AssignableTo(T, U)
+ // cannot be true unless mask(T)&mask(U)==mask(U).
+ //
+ // As with a Bloom filter, we can improve precision by testing
+ // additional hashes, e.g. using the last letter of each
+ // method name, so long as the subset mask property holds.
+ //
+ // When analyzing the standard library, there are about 1e6
+ // calls to satisfies(), of which 0.6% return true. With a
+ // 1-hash filter, 95% of calls avoid the expensive check; with
+ // a 2-hash filter, this grows to 98.2%.
+ satisfies := func(T, U *msetInfo) bool {
+ return T.mask1&U.mask1 == U.mask1 &&
+ T.mask2&U.mask2 == U.mask2 &&
+ containsAllIdsOf(T.mset, U.mset)
+ }
+
+ // Information about a named type N, and perhaps also *N.
+ type namedInfo struct {
+ isInterface bool
+ base msetInfo // N
+ ptr msetInfo // *N, iff N !isInterface
+ }
+
+ var infos []namedInfo
+
+ // Precompute the method sets and their masks.
+ for _, N := range allNamed {
+ var info namedInfo
+ initMsetInfo(&info.base, N)
+ _, info.isInterface = N.Underlying().(*types.Interface)
+ if !info.isInterface {
+ initMsetInfo(&info.ptr, types.NewPointer(N))
+ }
+
+ if info.base.mask1|info.ptr.mask1 == 0 {
+ continue // neither N nor *N has methods
+ }
+
+ infos = append(infos, info)
+ }
+
+ facts := make(map[*types.Named]implementsFacts)
+
+ // Test all pairs of distinct named types (T, U).
+ // TODO(adonovan): opt: compute (U, T) at the same time.
+ for t := range infos {
+ T := &infos[t]
+ var to, from, fromPtr []types.Type
+ for u := range infos {
+ if t == u {
+ continue
+ }
+ U := &infos[u]
+ switch {
+ case T.isInterface && U.isInterface:
+ if satisfies(&U.base, &T.base) {
+ to = append(to, U.base.typ)
+ }
+ if satisfies(&T.base, &U.base) {
+ from = append(from, U.base.typ)
+ }
+ case T.isInterface: // U concrete
+ if satisfies(&U.base, &T.base) {
+ to = append(to, U.base.typ)
+ } else if satisfies(&U.ptr, &T.base) {
+ to = append(to, U.ptr.typ)
+ }
+ case U.isInterface: // T concrete
+ if satisfies(&T.base, &U.base) {
+ from = append(from, U.base.typ)
+ } else if satisfies(&T.ptr, &U.base) {
+ fromPtr = append(fromPtr, U.base.typ)
+ }
+ }
+ }
+
+ // Sort types (arbitrarily) to avoid nondeterminism.
+ sort.Sort(typesByString(to))
+ sort.Sort(typesByString(from))
+ sort.Sort(typesByString(fromPtr))
+
+ facts[T.base.typ.(*types.Named)] = implementsFacts{to, from, fromPtr}
+ }
+
+ return facts
+}
+
+type implementsFacts struct {
+ to []types.Type // named or ptr-to-named types assignable to interface T
+ from []types.Type // named interfaces assignable from T
+ fromPtr []types.Type // named interfaces assignable only from *T
+}
+
+type typesByString []types.Type
+
+func (p typesByString) Len() int { return len(p) }
+func (p typesByString) Less(i, j int) bool { return p[i].String() < p[j].String() }
+func (p typesByString) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// methodBit returns the index of x in [a-zA-Z], or 52 if not found.
+func methodBit(x byte) uint64 {
+ switch {
+ case 'a' <= x && x <= 'z':
+ return uint64(x - 'a')
+ case 'A' <= x && x <= 'Z':
+ return uint64(26 + x - 'A')
+ }
+ return 52 // all other bytes
+}
+
+// containsAllIdsOf reports whether the method identifiers of T are a
+// superset of those in U. If U belongs to an interface type, the
+// result is equal to types.Assignable(T, U), but is cheaper to compute.
+//
+// TODO(gri): make this a method of *types.MethodSet.
+//
+func containsAllIdsOf(T, U *types.MethodSet) bool {
+ t, tlen := 0, T.Len()
+ u, ulen := 0, U.Len()
+ for t < tlen && u < ulen {
+ tMeth := T.At(t).Obj()
+ uMeth := U.At(u).Obj()
+ tId := tMeth.Id()
+ uId := uMeth.Id()
+ if tId > uId {
+ // U has a method T lacks: fail.
+ return false
+ }
+ if tId < uId {
+ // T has a method U lacks: ignore it.
+ t++
+ continue
+ }
+ // U and T both have a method of this Id. Check types.
+ if !types.Identical(tMeth.Type(), uMeth.Type()) {
+ return false // type mismatch
+ }
+ u++
+ t++
+ }
+ return u == ulen
+}
diff --git a/llgo/third_party/go.tools/godoc/analysis/json.go b/llgo/third_party/go.tools/godoc/analysis/json.go
new file mode 100644
index 0000000000000000000000000000000000000000..f8976187c2cc4ecd46be4fd2ea55f2be353ffce4
--- /dev/null
+++ b/llgo/third_party/go.tools/godoc/analysis/json.go
@@ -0,0 +1,69 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package analysis
+
+// This file defines types used by client-side JavaScript.
+
+type anchorJSON struct {
+ Text string // HTML
+ Href string // URL
+}
+
+type commOpJSON struct {
+ Op anchorJSON
+ Fn string
+}
+
+// JavaScript's onClickComm() expects a commJSON.
+type commJSON struct {
+ Ops []commOpJSON
+}
+
+// Indicates one of these forms of fact about a type T:
+// T "is implemented by type " (ByKind != "", e.g. "array")
+// T "implements " (ByKind == "")
+type implFactJSON struct {
+ ByKind string `json:",omitempty"`
+ Other anchorJSON
+}
+
+// Implements facts are grouped by form, for ease of reading.
+type implGroupJSON struct {
+ Descr string
+ Facts []implFactJSON
+}
+
+// JavaScript's onClickIdent() expects a TypeInfoJSON.
+type TypeInfoJSON struct {
+ Name string // type name
+ Size, Align int64
+ Methods []anchorJSON
+ ImplGroups []implGroupJSON
+}
+
+// JavaScript's onClickCallees() expects a calleesJSON.
+type calleesJSON struct {
+ Descr string
+ Callees []anchorJSON // markup for called function
+}
+
+type callerJSON struct {
+ Func string
+ Sites []anchorJSON
+}
+
+// JavaScript's onClickCallers() expects a callersJSON.
+type callersJSON struct {
+ Callee string
+ Callers []callerJSON
+}
+
+// JavaScript's cgAddChild requires a global array of PCGNodeJSON
+// called CALLGRAPH, representing the intra-package call graph.
+// The first element is special and represents "all external callers".
+type PCGNodeJSON struct {
+ Func anchorJSON
+ Callees []int // indices within CALLGRAPH of nodes called by this one
+}
diff --git a/llgo/third_party/go.tools/godoc/analysis/peers.go b/llgo/third_party/go.tools/godoc/analysis/peers.go
new file mode 100644
index 0000000000000000000000000000000000000000..ee4d5389dde88225f5febe730d56d447d620911b
--- /dev/null
+++ b/llgo/third_party/go.tools/godoc/analysis/peers.go
@@ -0,0 +1,154 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package analysis
+
+// This file computes the channel "peers" relation over all pairs of
+// channel operations in the program. The peers are displayed in the
+// lower pane when a channel operation (make, <-, close) is clicked.
+
+// TODO(adonovan): handle calls to reflect.{Select,Recv,Send,Close} too,
+// then enable reflection in PTA.
+
+import (
+ "fmt"
+ "go/token"
+
+ "llvm.org/llgo/third_party/go.tools/go/pointer"
+ "llvm.org/llgo/third_party/go.tools/go/ssa"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+)
+
+func (a *analysis) doChannelPeers(ptsets map[ssa.Value]pointer.Pointer) {
+ addSendRecv := func(j *commJSON, op chanOp) {
+ j.Ops = append(j.Ops, commOpJSON{
+ Op: anchorJSON{
+ Text: op.mode,
+ Href: a.posURL(op.pos, op.len),
+ },
+ Fn: prettyFunc(nil, op.fn),
+ })
+ }
+
+ // Build an undirected bipartite multigraph (binary relation)
+ // of MakeChan ops and send/recv/close ops.
+ //
+ // TODO(adonovan): opt: use channel element types to partition
+ // the O(n^2) problem into subproblems.
+ aliasedOps := make(map[*ssa.MakeChan][]chanOp)
+ opToMakes := make(map[chanOp][]*ssa.MakeChan)
+ for _, op := range a.ops {
+ // Combine the PT sets from all contexts.
+ var makes []*ssa.MakeChan // aliased ops
+ ptr, ok := ptsets[op.ch]
+ if !ok {
+ continue // e.g. channel op in dead code
+ }
+ for _, label := range ptr.PointsTo().Labels() {
+ makechan, ok := label.Value().(*ssa.MakeChan)
+ if !ok {
+ continue // skip intrinsically-created channels for now
+ }
+ if makechan.Pos() == token.NoPos {
+ continue // not possible?
+ }
+ makes = append(makes, makechan)
+ aliasedOps[makechan] = append(aliasedOps[makechan], op)
+ }
+ opToMakes[op] = makes
+ }
+
+ // Now that complete relation is built, build links for ops.
+ for _, op := range a.ops {
+ v := commJSON{
+ Ops: []commOpJSON{}, // (JS wants non-nil)
+ }
+ ops := make(map[chanOp]bool)
+ for _, makechan := range opToMakes[op] {
+ v.Ops = append(v.Ops, commOpJSON{
+ Op: anchorJSON{
+ Text: "made",
+ Href: a.posURL(makechan.Pos()-token.Pos(len("make")),
+ len("make")),
+ },
+ Fn: makechan.Parent().RelString(op.fn.Package().Object),
+ })
+ for _, op := range aliasedOps[makechan] {
+ ops[op] = true
+ }
+ }
+ for op := range ops {
+ addSendRecv(&v, op)
+ }
+
+ // Add links for each aliased op.
+ fi, offset := a.fileAndOffset(op.pos)
+ fi.addLink(aLink{
+ start: offset,
+ end: offset + op.len,
+ title: "show channel ops",
+ onclick: fmt.Sprintf("onClickComm(%d)", fi.addData(v)),
+ })
+ }
+ // Add links for makechan ops themselves.
+ for makechan, ops := range aliasedOps {
+ v := commJSON{
+ Ops: []commOpJSON{}, // (JS wants non-nil)
+ }
+ for _, op := range ops {
+ addSendRecv(&v, op)
+ }
+
+ fi, offset := a.fileAndOffset(makechan.Pos())
+ fi.addLink(aLink{
+ start: offset - len("make"),
+ end: offset,
+ title: "show channel ops",
+ onclick: fmt.Sprintf("onClickComm(%d)", fi.addData(v)),
+ })
+ }
+}
+
+// -- utilities --------------------------------------------------------
+
+// chanOp abstracts an ssa.Send, ssa.Unop(ARROW), close(), or a SelectState.
+// Derived from oracle/peers.go.
+type chanOp struct {
+ ch ssa.Value
+ mode string // sent|received|closed
+ pos token.Pos
+ len int
+ fn *ssa.Function
+}
+
+// chanOps returns a slice of all the channel operations in the instruction.
+// Derived from oracle/peers.go.
+func chanOps(instr ssa.Instruction) []chanOp {
+ fn := instr.Parent()
+ var ops []chanOp
+ switch instr := instr.(type) {
+ case *ssa.UnOp:
+ if instr.Op == token.ARROW {
+ // TODO(adonovan): don't assume <-ch; could be 'range ch'.
+ ops = append(ops, chanOp{instr.X, "received", instr.Pos(), len("<-"), fn})
+ }
+ case *ssa.Send:
+ ops = append(ops, chanOp{instr.Chan, "sent", instr.Pos(), len("<-"), fn})
+ case *ssa.Select:
+ for _, st := range instr.States {
+ mode := "received"
+ if st.Dir == types.SendOnly {
+ mode = "sent"
+ }
+ ops = append(ops, chanOp{st.Chan, mode, st.Pos, len("<-"), fn})
+ }
+ case ssa.CallInstruction:
+ call := instr.Common()
+ if blt, ok := call.Value.(*ssa.Builtin); ok && blt.Name() == "close" {
+ pos := instr.Common().Pos()
+ ops = append(ops, chanOp{call.Args[0], "closed", pos - token.Pos(len("close")), len("close("), fn})
+ }
+ }
+ return ops
+}
diff --git a/llgo/third_party/go.tools/godoc/analysis/typeinfo.go b/llgo/third_party/go.tools/godoc/analysis/typeinfo.go
new file mode 100644
index 0000000000000000000000000000000000000000..a997e04234277998b03b3654b3fd2928e4602ac9
--- /dev/null
+++ b/llgo/third_party/go.tools/godoc/analysis/typeinfo.go
@@ -0,0 +1,234 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package analysis
+
+// This file computes the markup for information from go/types:
+// IMPORTS, identifier RESOLUTION, METHOD SETS, size/alignment, and
+// the IMPLEMENTS relation.
+//
+// IMPORTS links connect import specs to the documentation for the
+// imported package.
+//
+// RESOLUTION links referring identifiers to their defining
+// identifier, and adds tooltips for kind and type.
+//
+// METHOD SETS, size/alignment, and the IMPLEMENTS relation are
+// displayed in the lower pane when a type's defining identifier is
+// clicked.
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "llvm.org/llgo/third_party/go.tools/go/loader"
+ "llvm.org/llgo/third_party/go.tools/go/types"
+ "llvm.org/llgo/third_party/go.tools/go/types/typeutil"
+)
+
+// TODO(adonovan): audit to make sure it's safe on ill-typed packages.
+
+// TODO(adonovan): use same Sizes as loader.Config.
+var sizes = types.StdSizes{8, 8}
+
+func (a *analysis) doTypeInfo(info *loader.PackageInfo, implements map[*types.Named]implementsFacts) {
+ // We must not assume the corresponding SSA packages were
+ // created (i.e. were transitively error-free).
+
+ // IMPORTS
+ for _, f := range info.Files {
+ // Package decl.
+ fi, offset := a.fileAndOffset(f.Name.Pos())
+ fi.addLink(aLink{
+ start: offset,
+ end: offset + len(f.Name.Name),
+ title: "Package docs for " + info.Pkg.Path(),
+ // TODO(adonovan): fix: we're putting the untrusted Path()
+ // into a trusted field. What's the appropriate sanitizer?
+ href: "/pkg/" + info.Pkg.Path(),
+ })
+
+ // Import specs.
+ for _, imp := range f.Imports {
+ // Remove quotes.
+ L := int(imp.End()-imp.Path.Pos()) - len(`""`)
+ path, _ := strconv.Unquote(imp.Path.Value)
+ fi, offset := a.fileAndOffset(imp.Path.Pos())
+ fi.addLink(aLink{
+ start: offset + 1,
+ end: offset + 1 + L,
+ title: "Package docs for " + path,
+ // TODO(adonovan): fix: we're putting the untrusted path
+ // into a trusted field. What's the appropriate sanitizer?
+ href: "/pkg/" + path,
+ })
+ }
+ }
+
+ // RESOLUTION
+ for id, obj := range info.Uses {
+ // Position of the object definition.
+ pos := obj.Pos()
+ Len := len(obj.Name())
+
+ // Correct the position for non-renaming import specs.
+ // import "sync/atomic"
+ // ^^^^^^^^^^^
+ if obj, ok := obj.(*types.PkgName); ok && id.Name == obj.Imported().Name() {
+ // Assume this is a non-renaming import.
+ // NB: not true for degenerate renamings: `import foo "foo"`.
+ pos++
+ Len = len(obj.Imported().Path())
+ }
+
+ if obj.Pkg() == nil {
+ continue // don't mark up built-ins.
+ }
+
+ fi, offset := a.fileAndOffset(id.NamePos)
+ fi.addLink(aLink{
+ start: offset,
+ end: offset + len(id.Name),
+ title: types.ObjectString(info.Pkg, obj),
+ href: a.posURL(pos, Len),
+ })
+ }
+
+ // IMPLEMENTS & METHOD SETS
+ for _, obj := range info.Defs {
+ if obj, ok := obj.(*types.TypeName); ok {
+ a.namedType(obj, implements)
+ }
+ }
+}
+
+func (a *analysis) namedType(obj *types.TypeName, implements map[*types.Named]implementsFacts) {
+ this := obj.Pkg()
+ T := obj.Type().(*types.Named)
+ v := &TypeInfoJSON{
+ Name: obj.Name(),
+ Size: sizes.Sizeof(T),
+ Align: sizes.Alignof(T),
+ Methods: []anchorJSON{}, // (JS wants non-nil)
+ }
+
+ // addFact adds the fact "is implemented by T" (by) or
+ // "implements T" (!by) to group.
+ addFact := func(group *implGroupJSON, T types.Type, by bool) {
+ Tobj := deref(T).(*types.Named).Obj()
+ var byKind string
+ if by {
+ // Show underlying kind of implementing type,
+ // e.g. "slice", "array", "struct".
+ s := reflect.TypeOf(T.Underlying()).String()
+ byKind = strings.ToLower(strings.TrimPrefix(s, "*types."))
+ }
+ group.Facts = append(group.Facts, implFactJSON{
+ ByKind: byKind,
+ Other: anchorJSON{
+ Href: a.posURL(Tobj.Pos(), len(Tobj.Name())),
+ Text: types.TypeString(this, T),
+ },
+ })
+ }
+
+ // IMPLEMENTS
+ if r, ok := implements[T]; ok {
+ if isInterface(T) {
+ // "T is implemented by " ...
+ // "T is implemented by "...
+ // "T implements "...
+ group := implGroupJSON{
+ Descr: types.TypeString(this, T),
+ }
+ // Show concrete types first; use two passes.
+ for _, sub := range r.to {
+ if !isInterface(sub) {
+ addFact(&group, sub, true)
+ }
+ }
+ for _, sub := range r.to {
+ if isInterface(sub) {
+ addFact(&group, sub, true)
+ }
+ }
+ for _, super := range r.from {
+ addFact(&group, super, false)
+ }
+ v.ImplGroups = append(v.ImplGroups, group)
+ } else {
+ // T is concrete.
+ if r.from != nil {
+ // "T implements "...
+ group := implGroupJSON{
+ Descr: types.TypeString(this, T),
+ }
+ for _, super := range r.from {
+ addFact(&group, super, false)
+ }
+ v.ImplGroups = append(v.ImplGroups, group)
+ }
+ if r.fromPtr != nil {
+ // "*C implements "...
+ group := implGroupJSON{
+ Descr: "*" + types.TypeString(this, T),
+ }
+ for _, psuper := range r.fromPtr {
+ addFact(&group, psuper, false)
+ }
+ v.ImplGroups = append(v.ImplGroups, group)
+ }
+ }
+ }
+
+ // METHOD SETS
+ for _, sel := range typeutil.IntuitiveMethodSet(T, &a.prog.MethodSets) {
+ meth := sel.Obj().(*types.Func)
+ pos := meth.Pos() // may be 0 for error.Error
+ v.Methods = append(v.Methods, anchorJSON{
+ Href: a.posURL(pos, len(meth.Name())),
+ Text: types.SelectionString(this, sel),
+ })
+ }
+
+ // Since there can be many specs per decl, we
+ // can't attach the link to the keyword 'type'
+ // (as we do with 'func'); we use the Ident.
+ fi, offset := a.fileAndOffset(obj.Pos())
+ fi.addLink(aLink{
+ start: offset,
+ end: offset + len(obj.Name()),
+ title: fmt.Sprintf("type info for %s", obj.Name()),
+ onclick: fmt.Sprintf("onClickTypeInfo(%d)", fi.addData(v)),
+ })
+
+ // Add info for exported package-level types to the package info.
+ if obj.Exported() && isPackageLevel(obj) {
+ // TODO(adonovan): this.Path() is not unique!
+ // It is possible to declare a non-test package called x_test.
+ a.result.pkgInfo(this.Path()).addType(v)
+ }
+}
+
+// -- utilities --------------------------------------------------------
+
+func isInterface(T types.Type) bool {
+ _, isI := T.Underlying().(*types.Interface)
+ return isI
+}
+
+// deref returns a pointer's element type; otherwise it returns typ.
+func deref(typ types.Type) types.Type {
+ if p, ok := typ.Underlying().(*types.Pointer); ok {
+ return p.Elem()
+ }
+ return typ
+}
+
+// isPackageLevel reports whether obj is a package-level object.
+func isPackageLevel(obj types.Object) bool {
+ return obj.Pkg().Scope().Lookup(obj.Name()) == obj
+}
diff --git a/llgo/third_party/go.tools/godoc/cmdline.go b/llgo/third_party/go.tools/godoc/cmdline.go
new file mode 100644
index 0000000000000000000000000000000000000000..ef9773fbc7fcf807c03f477a1dac281da12feef0
--- /dev/null
+++ b/llgo/third_party/go.tools/godoc/cmdline.go
@@ -0,0 +1,206 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package godoc
+
+import (
+ "fmt"
+ "go/ast"
+ "go/build"
+ "io"
+ "log"
+ "os"
+ pathpkg "path"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "llvm.org/llgo/third_party/go.tools/godoc/vfs"
+)
+
+const (
+ target = "/target"
+ cmdPrefix = "cmd/"
+ srcPrefix = "src/"
+ toolsPath = "llvm.org/llgo/third_party/go.tools/cmd/"
+)
+
+// CommandLine returns godoc results to w.
+// Note that it may add a /target path to fs.
+func CommandLine(w io.Writer, fs vfs.NameSpace, pres *Presentation, args []string) error {
+ path := args[0]
+ srcMode := pres.SrcMode
+ cmdMode := strings.HasPrefix(path, cmdPrefix)
+ if strings.HasPrefix(path, srcPrefix) {
+ path = strings.TrimPrefix(path, srcPrefix)
+ srcMode = true
+ }
+ var abspath, relpath string
+ if cmdMode {
+ path = strings.TrimPrefix(path, cmdPrefix)
+ } else {
+ abspath, relpath = paths(fs, pres, path)
+ }
+
+ var mode PageInfoMode
+ if relpath == builtinPkgPath {
+ // the fake built-in package contains unexported identifiers
+ mode = NoFiltering | NoTypeAssoc
+ }
+ if srcMode {
+ // only filter exports if we don't have explicit command-line filter arguments
+ if len(args) > 1 {
+ mode |= NoFiltering
+ }
+ mode |= ShowSource
+ }
+
+ // First, try as package unless forced as command.
+ var info *PageInfo
+ if !cmdMode {
+ info = pres.GetPkgPageInfo(abspath, relpath, mode)
+ }
+
+ // Second, try as command (if the path is not absolute).
+ var cinfo *PageInfo
+ if !filepath.IsAbs(path) {
+ // First try go.tools/cmd.
+ abspath = pathpkg.Join(pres.PkgFSRoot(), toolsPath+path)
+ cinfo = pres.GetCmdPageInfo(abspath, relpath, mode)
+ if cinfo.IsEmpty() {
+ // Then try $GOROOT/cmd.
+ abspath = pathpkg.Join(pres.CmdFSRoot(), path)
+ cinfo = pres.GetCmdPageInfo(abspath, relpath, mode)
+ }
+ }
+
+ // determine what to use
+ if info == nil || info.IsEmpty() {
+ if cinfo != nil && !cinfo.IsEmpty() {
+ // only cinfo exists - switch to cinfo
+ info = cinfo
+ }
+ } else if cinfo != nil && !cinfo.IsEmpty() {
+ // both info and cinfo exist - use cinfo if info
+ // contains only subdirectory information
+ if info.PAst == nil && info.PDoc == nil {
+ info = cinfo
+ } else if relpath != target {
+ // The above check handles the case where an operating system path
+ // is provided (see documentation for paths below). In that case,
+ // relpath is set to "/target" (in anticipation of accessing packages there),
+ // and is therefore not expected to match a command.
+ fmt.Fprintf(w, "use 'godoc %s%s' for documentation on the %s command \n\n", cmdPrefix, relpath, relpath)
+ }
+ }
+
+ if info == nil {
+ return fmt.Errorf("%s: no such directory or package", args[0])
+ }
+ if info.Err != nil {
+ return info.Err
+ }
+
+ if info.PDoc != nil && info.PDoc.ImportPath == target {
+ // Replace virtual /target with actual argument from command line.
+ info.PDoc.ImportPath = args[0]
+ }
+
+ // If we have more than one argument, use the remaining arguments for filtering.
+ if len(args) > 1 {
+ info.IsFiltered = true
+ filterInfo(args[1:], info)
+ }
+
+ packageText := pres.PackageText
+ if pres.HTMLMode {
+ packageText = pres.PackageHTML
+ }
+ if err := packageText.Execute(w, info); err != nil {
+ return err
+ }
+ return nil
+}
+
+// paths determines the paths to use.
+//
+// If we are passed an operating system path like . or ./foo or /foo/bar or c:\mysrc,
+// we need to map that path somewhere in the fs name space so that routines
+// like getPageInfo will see it. We use the arbitrarily-chosen virtual path "/target"
+// for this. That is, if we get passed a directory like the above, we map that
+// directory so that getPageInfo sees it as /target.
+// Returns the absolute and relative paths.
+func paths(fs vfs.NameSpace, pres *Presentation, path string) (string, string) {
+ if filepath.IsAbs(path) {
+ fs.Bind(target, vfs.OS(path), "/", vfs.BindReplace)
+ return target, target
+ }
+ if build.IsLocalImport(path) {
+ cwd, _ := os.Getwd() // ignore errors
+ path = filepath.Join(cwd, path)
+ fs.Bind(target, vfs.OS(path), "/", vfs.BindReplace)
+ return target, target
+ }
+ if bp, _ := build.Import(path, "", build.FindOnly); bp.Dir != "" && bp.ImportPath != "" {
+ fs.Bind(target, vfs.OS(bp.Dir), "/", vfs.BindReplace)
+ return target, bp.ImportPath
+ }
+ return pathpkg.Join(pres.PkgFSRoot(), path), path
+}
+
+// filterInfo updates info to include only the nodes that match the given
+// filter args.
+func filterInfo(args []string, info *PageInfo) {
+ rx, err := makeRx(args)
+ if err != nil {
+ log.Fatalf("illegal regular expression from %v: %v", args, err)
+ }
+
+ filter := func(s string) bool { return rx.MatchString(s) }
+ switch {
+ case info.PAst != nil:
+ newPAst := map[string]*ast.File{}
+ for name, a := range info.PAst {
+ cmap := ast.NewCommentMap(info.FSet, a, a.Comments)
+ a.Comments = []*ast.CommentGroup{} // remove all comments.
+ ast.FilterFile(a, filter)
+ if len(a.Decls) > 0 {
+ newPAst[name] = a
+ }
+ for _, d := range a.Decls {
+ // add back the comments associated with d only
+ comments := cmap.Filter(d).Comments()
+ a.Comments = append(a.Comments, comments...)
+ }
+ }
+ info.PAst = newPAst // add only matching files.
+ case info.PDoc != nil:
+ info.PDoc.Filter(filter)
+ }
+}
+
+// Does s look like a regular expression?
+func isRegexp(s string) bool {
+ return strings.IndexAny(s, ".(|)*+?^$[]") >= 0
+}
+
+// Make a regular expression of the form
+// names[0]|names[1]|...names[len(names)-1].
+// Returns an error if the regular expression is illegal.
+func makeRx(names []string) (*regexp.Regexp, error) {
+ if len(names) == 0 {
+ return nil, fmt.Errorf("no expression provided")
+ }
+ s := ""
+ for i, name := range names {
+ if i > 0 {
+ s += "|"
+ }
+ if isRegexp(name) {
+ s += name
+ } else {
+ s += "^" + name + "$" // must match exactly
+ }
+ }
+ return regexp.Compile(s)
+}
diff --git a/llgo/third_party/go.tools/godoc/cmdline_test.go b/llgo/third_party/go.tools/godoc/cmdline_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..7b10be9126d727036c0f90b93a78a43fcfc5a846
--- /dev/null
+++ b/llgo/third_party/go.tools/godoc/cmdline_test.go
@@ -0,0 +1,290 @@
+package godoc
+
+import (
+ "bytes"
+ "go/build"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "runtime"
+ "testing"
+ "text/template"
+
+ "llvm.org/llgo/third_party/go.tools/godoc/vfs"
+ "llvm.org/llgo/third_party/go.tools/godoc/vfs/mapfs"
+)
+
+// setupGoroot creates temporary directory to act as GOROOT when running tests
+// that depend upon the build package. It updates build.Default to point to the
+// new GOROOT.
+// It returns a function that can be called to reset build.Default and remove
+// the temporary directory.
+func setupGoroot(t *testing.T) (cleanup func()) {
+ var stdLib = map[string]string{
+ "src/fmt/fmt.go": `// Package fmt implements formatted I/O.
+package fmt
+
+type Stringer interface {
+ String() string
+}
+`,
+ }
+ goroot, err := ioutil.TempDir("", "cmdline_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ origContext := build.Default
+ build.Default = build.Context{
+ GOROOT: goroot,
+ Compiler: "gc",
+ }
+ for relname, contents := range stdLib {
+ name := filepath.Join(goroot, relname)
+ if err := os.MkdirAll(filepath.Dir(name), 0770); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(name, []byte(contents), 0770); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ return func() {
+ if err := os.RemoveAll(goroot); err != nil {
+ t.Log(err)
+ }
+ build.Default = origContext
+ }
+}
+
+func TestPaths(t *testing.T) {
+ cleanup := setupGoroot(t)
+ defer cleanup()
+
+ pres := &Presentation{
+ pkgHandler: handlerServer{
+ fsRoot: "/fsroot",
+ },
+ }
+ fs := make(vfs.NameSpace)
+
+ absPath := "/foo/fmt"
+ if runtime.GOOS == "windows" {
+ absPath = `c:\foo\fmt`
+ }
+
+ for _, tc := range []struct {
+ desc string
+ path string
+ expAbs string
+ expRel string
+ }{
+ {
+ "Absolute path",
+ absPath,
+ "/target",
+ "/target",
+ },
+ {
+ "Local import",
+ "../foo/fmt",
+ "/target",
+ "/target",
+ },
+ {
+ "Import",
+ "fmt",
+ "/target",
+ "fmt",
+ },
+ {
+ "Default",
+ "unknownpkg",
+ "/fsroot/unknownpkg",
+ "unknownpkg",
+ },
+ } {
+ abs, rel := paths(fs, pres, tc.path)
+ if abs != tc.expAbs || rel != tc.expRel {
+ t.Errorf("%s: paths(%q) = %s,%s; want %s,%s", tc.desc, tc.path, abs, rel, tc.expAbs, tc.expRel)
+ }
+ }
+}
+
+func TestMakeRx(t *testing.T) {
+ for _, tc := range []struct {
+ desc string
+ names []string
+ exp string
+ }{
+ {
+ desc: "empty string",
+ names: []string{""},
+ exp: `^$`,
+ },
+ {
+ desc: "simple text",
+ names: []string{"a"},
+ exp: `^a$`,
+ },
+ {
+ desc: "two words",
+ names: []string{"foo", "bar"},
+ exp: `^foo$|^bar$`,
+ },
+ {
+ desc: "word & non-trivial",
+ names: []string{"foo", `ab?c`},
+ exp: `^foo$|ab?c`,
+ },
+ {
+ desc: "bad regexp",
+ names: []string{`(."`},
+ exp: `(."`,
+ },
+ } {
+ expRE, expErr := regexp.Compile(tc.exp)
+ if re, err := makeRx(tc.names); !reflect.DeepEqual(err, expErr) && !reflect.DeepEqual(re, expRE) {
+ t.Errorf("%s: makeRx(%v) = %q,%q; want %q,%q", tc.desc, tc.names, re, err, expRE, expErr)
+ }
+ }
+}
+
+func TestCommandLine(t *testing.T) {
+ cleanup := setupGoroot(t)
+ defer cleanup()
+ mfs := mapfs.New(map[string]string{
+ "src/bar/bar.go": `// Package bar is an example.
+package bar
+`,
+ "src/foo/foo.go": `// Package foo.
+package foo
+
+// First function is first.
+func First() {
+}
+
+// Second function is second.
+func Second() {
+}
+`,
+ "src/gen/gen.go": `// Package gen
+package gen
+
+//line notgen.go:3
+// F doc //line 1 should appear
+// line 2 should appear
+func F()
+//line foo.go:100`, // no newline on end to check corner cases!
+ "src/vet/vet.go": `// Package vet
+package vet
+`,
+ "src/cmd/go/doc.go": `// The go command
+package main
+`,
+ "src/cmd/gofmt/doc.go": `// The gofmt command
+package main
+`,
+ "src/cmd/vet/vet.go": `// The vet command
+package main
+`,
+ })
+ fs := make(vfs.NameSpace)
+ fs.Bind("/", mfs, "/", vfs.BindReplace)
+ c := NewCorpus(fs)
+ p := &Presentation{Corpus: c}
+ p.cmdHandler = handlerServer{
+ p: p,
+ c: c,
+ pattern: "/cmd/",
+ fsRoot: "/src/cmd",
+ }
+ p.pkgHandler = handlerServer{
+ p: p,
+ c: c,
+ pattern: "/pkg/",
+ fsRoot: "/src",
+ exclude: []string{"/src/cmd"},
+ }
+ p.initFuncMap()
+ p.PackageText = template.Must(template.New("PackageText").Funcs(p.FuncMap()).Parse(`{{$info := .}}{{$filtered := .IsFiltered}}{{if $filtered}}{{range .PAst}}{{range .Decls}}{{node $info .}}{{end}}{{end}}{{else}}{{with .PAst}}{{range $filename, $ast := .}}{{$filename}}:
+{{node $ $ast}}{{end}}{{end}}{{end}}{{with .PDoc}}{{if $.IsMain}}COMMAND {{.Doc}}{{else}}PACKAGE {{.Doc}}{{end}}{{with .Funcs}}
+{{range .}}{{node $ .Decl}}
+{{comment_text .Doc " " "\t"}}{{end}}{{end}}{{end}}`))
+
+ for _, tc := range []struct {
+ desc string
+ args []string
+ exp string
+ err bool
+ }{
+ {
+ desc: "standard package",
+ args: []string{"fmt"},
+ exp: "PACKAGE Package fmt implements formatted I/O.\n",
+ },
+ {
+ desc: "package",
+ args: []string{"bar"},
+ exp: "PACKAGE Package bar is an example.\n",
+ },
+ {
+ desc: "package w. filter",
+ args: []string{"foo", "First"},
+ exp: "PACKAGE \nfunc First()\n First function is first.\n",
+ },
+ {
+ desc: "package w. bad filter",
+ args: []string{"foo", "DNE"},
+ exp: "PACKAGE ",
+ },
+ {
+ desc: "source mode",
+ args: []string{"src/bar"},
+ exp: "bar/bar.go:\n// Package bar is an example.\npackage bar\n",
+ },
+ {
+ desc: "source mode w. filter",
+ args: []string{"src/foo", "Second"},
+ exp: "// Second function is second.\nfunc Second() {\n}",
+ },
+ {
+ desc: "package w. //line comments",
+ args: []string{"gen", "F"},
+ exp: "PACKAGE \nfunc F()\n F doc //line 1 should appear line 2 should appear\n",
+ },
+ {
+ desc: "command",
+ args: []string{"go"},
+ exp: "COMMAND The go command\n",
+ },
+ {
+ desc: "forced command",
+ args: []string{"cmd/gofmt"},
+ exp: "COMMAND The gofmt command\n",
+ },
+ {
+ desc: "bad arg",
+ args: []string{"doesnotexist"},
+ err: true,
+ },
+ {
+ desc: "both command and package",
+ args: []string{"vet"},
+ exp: "use 'godoc cmd/vet' for documentation on the vet command \n\nPACKAGE Package vet\n",
+ },
+ {
+ desc: "root directory",
+ args: []string{"/"},
+ exp: "",
+ },
+ } {
+ w := new(bytes.Buffer)
+ err := CommandLine(w, fs, p, tc.args)
+ if got, want := w.String(), tc.exp; got != want || tc.err == (err == nil) {
+ t.Errorf("%s: CommandLine(%v) = %q (%v); want %q (%v)",
+ tc.desc, tc.args, got, err, want, tc.err)
+ }
+ }
+}
diff --git a/llgo/third_party/go.tools/godoc/corpus.go b/llgo/third_party/go.tools/godoc/corpus.go
new file mode 100644
index 0000000000000000000000000000000000000000..dfaeb51107c9472cd8849c22744ec5c7538918ac
--- /dev/null
+++ b/llgo/third_party/go.tools/godoc/corpus.go
@@ -0,0 +1,157 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package godoc
+
+import (
+ "errors"
+ pathpkg "path"
+ "time"
+
+ "llvm.org/llgo/third_party/go.tools/godoc/analysis"
+ "llvm.org/llgo/third_party/go.tools/godoc/util"
+ "llvm.org/llgo/third_party/go.tools/godoc/vfs"
+)
+
+// A Corpus holds all the state related to serving and indexing a
+// collection of Go code.
+//
+// Construct a new Corpus with NewCorpus, then modify options,
+// then call its Init method.
+type Corpus struct {
+ fs vfs.FileSystem
+
+ // Verbose logging.
+ Verbose bool
+
+ // IndexEnabled controls whether indexing is enabled.
+ IndexEnabled bool
+
+ // IndexFiles specifies a glob pattern specifying index files.
+ // If not empty, the index is read from these files in sorted
+ // order.
+ IndexFiles string
+
+ // IndexThrottle specifies the indexing throttle value
+ // between 0.0 and 1.0. At 0.0, the indexer always sleeps.
+ // At 1.0, the indexer never sleeps. Because 0.0 is useless
+ // and redundant with setting IndexEnabled to false, the
+ // zero value for IndexThrottle means 0.9.
+ IndexThrottle float64
+
+ // IndexInterval specifies the time to sleep between reindexing
+ // all the sources.
+ // If zero, a default is used. If negative, the index is only
+ // built once.
+ IndexInterval time.Duration
+
+ // IndexDocs enables indexing of Go documentation.
+ // This will produce search results for exported types, functions,
+ // methods, variables, and constants, and will link to the godoc
+ // documentation for those identifiers.
+ IndexDocs bool
+
+ // IndexGoCode enables indexing of Go source code.
+ // This will produce search results for internal and external identifiers
+ // and will link to both declarations and uses of those identifiers in
+ // source code.
+ IndexGoCode bool
+
+ // IndexFullText enables full-text indexing.
+ // This will provide search results for any matching text in any file that
+ // is indexed, including non-Go files (see whitelisted in index.go).
+ // Regexp searching is supported via full-text indexing.
+ IndexFullText bool
+
+ // MaxResults optionally specifies the maximum results for indexing.
+ MaxResults int
+
+ // SummarizePackage optionally specifies a function to
+ // summarize a package. It exists as an optimization to
+ // avoid reading files to parse package comments.
+ //
+ // If SummarizePackage returns false for ok, the caller
+ // ignores all return values and parses the files in the package
+ // as if SummarizePackage were nil.
+ //
+ // If showList is false, the package is hidden from the
+ // package listing.
+ SummarizePackage func(pkg string) (summary string, showList, ok bool)
+
+ // IndexDirectory optionally specifies a function to determine
+ // whether the provided directory should be indexed. The dir
+ // will be of the form "/src/cmd/6a", "/doc/play",
+ // "/src/io", etc.
+ // If nil, all directories are indexed if indexing is enabled.
+ IndexDirectory func(dir string) bool
+
+ testDir string // TODO(bradfitz,adg): migrate old godoc flag? looks unused.
+
+ // Send a value on this channel to trigger a metadata refresh.
+ // It is buffered so that if a signal is not lost if sent
+ // during a refresh.
+ refreshMetadataSignal chan bool
+
+ // file system information
+ fsTree util.RWValue // *Directory tree of packages, updated with each sync (but sync code is removed now)
+ fsModified util.RWValue // timestamp of last call to invalidateIndex
+ docMetadata util.RWValue // mapping from paths to *Metadata
+
+ // SearchIndex is the search index in use.
+ searchIndex util.RWValue
+
+ // Analysis is the result of type and pointer analysis.
+ Analysis analysis.Result
+}
+
+// NewCorpus returns a new Corpus from a filesystem.
+// The returned corpus has all indexing enabled and MaxResults set to 1000.
+// Change or set any options on Corpus before calling the Corpus.Init method.
+func NewCorpus(fs vfs.FileSystem) *Corpus {
+ c := &Corpus{
+ fs: fs,
+ refreshMetadataSignal: make(chan bool, 1),
+
+ MaxResults: 1000,
+ IndexEnabled: true,
+ IndexDocs: true,
+ IndexGoCode: true,
+ IndexFullText: true,
+ }
+ return c
+}
+
+func (c *Corpus) CurrentIndex() (*Index, time.Time) {
+ v, t := c.searchIndex.Get()
+ idx, _ := v.(*Index)
+ return idx, t
+}
+
+func (c *Corpus) FSModifiedTime() time.Time {
+ _, ts := c.fsModified.Get()
+ return ts
+}
+
+// Init initializes Corpus, once options on Corpus are set.
+// It must be called before any subsequent method calls.
+func (c *Corpus) Init() error {
+ // TODO(bradfitz): do this in a goroutine because newDirectory might block for a long time?
+ // It used to be sometimes done in a goroutine before, at least in HTTP server mode.
+ if err := c.initFSTree(); err != nil {
+ return err
+ }
+ c.updateMetadata()
+ go c.refreshMetadataLoop()
+ return nil
+}
+
+func (c *Corpus) initFSTree() error {
+ dir := c.newDirectory(pathpkg.Join("/", c.testDir), -1)
+ if dir == nil {
+ return errors.New("godoc: corpus fstree is nil")
+ }
+ c.fsTree.Set(dir)
+ c.invalidateIndex()
+ return nil
+}
diff --git a/llgo/third_party/go.tools/godoc/dirtrees.go b/llgo/third_party/go.tools/godoc/dirtrees.go
new file mode 100644
index 0000000000000000000000000000000000000000..a55b324f73e8a13dde03093e6fd81e760c86136a
--- /dev/null
+++ b/llgo/third_party/go.tools/godoc/dirtrees.go
@@ -0,0 +1,336 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the code dealing with package directory trees.
+
+package godoc
+
+import (
+ "bytes"
+ "go/doc"
+ "go/parser"
+ "go/token"
+ "log"
+ "os"
+ pathpkg "path"
+ "strings"
+)
+
+// Conventional name for directories containing test data.
+// Excluded from directory trees.
+//
+const testdataDirName = "testdata"
+
+type Directory struct {
+ Depth int
+ Path string // directory path; includes Name
+ Name string // directory name
+ HasPkg bool // true if the directory contains at least one package
+ Synopsis string // package documentation, if any
+ Dirs []*Directory // subdirectories
+}
+
+func isGoFile(fi os.FileInfo) bool {
+ name := fi.Name()
+ return !fi.IsDir() &&
+ len(name) > 0 && name[0] != '.' && // ignore .files
+ pathpkg.Ext(name) == ".go"
+}
+
+func isPkgFile(fi os.FileInfo) bool {
+ return isGoFile(fi) &&
+ !strings.HasSuffix(fi.Name(), "_test.go") // ignore test files
+}
+
+func isPkgDir(fi os.FileInfo) bool {
+ name := fi.Name()
+ return fi.IsDir() && len(name) > 0 &&
+ name[0] != '_' && name[0] != '.' // ignore _files and .files
+}
+
+type treeBuilder struct {
+ c *Corpus
+ maxDepth int
+}
+
+func (b *treeBuilder) newDirTree(fset *token.FileSet, path, name string, depth int) *Directory {
+ if name == testdataDirName {
+ return nil
+ }
+
+ if depth >= b.maxDepth {
+ // return a dummy directory so that the parent directory
+ // doesn't get discarded just because we reached the max
+ // directory depth
+ return &Directory{
+ Depth: depth,
+ Path: path,
+ Name: name,
+ }
+ }
+
+ var synopses [3]string // prioritized package documentation (0 == highest priority)
+
+ show := true // show in package listing
+ hasPkgFiles := false
+ haveSummary := false
+
+ if hook := b.c.SummarizePackage; hook != nil {
+ if summary, show0, ok := hook(strings.TrimPrefix(path, "/src/")); ok {
+ hasPkgFiles = true
+ show = show0
+ synopses[0] = summary
+ haveSummary = true
+ }
+ }
+
+ list, _ := b.c.fs.ReadDir(path)
+
+ // determine number of subdirectories and if there are package files
+ var dirchs []chan *Directory
+
+ for _, d := range list {
+ switch {
+ case isPkgDir(d):
+ ch := make(chan *Directory, 1)
+ dirchs = append(dirchs, ch)
+ go func(d os.FileInfo) {
+ name := d.Name()
+ ch <- b.newDirTree(fset, pathpkg.Join(path, name), name, depth+1)
+ }(d)
+ case !haveSummary && isPkgFile(d):
+ // looks like a package file, but may just be a file ending in ".go";
+ // don't just count it yet (otherwise we may end up with hasPkgFiles even
+ // though the directory doesn't contain any real package files - was bug)
+ // no "optimal" package synopsis yet; continue to collect synopses
+ file, err := b.c.parseFile(fset, pathpkg.Join(path, d.Name()),
+ parser.ParseComments|parser.PackageClauseOnly)
+ if err == nil {
+ hasPkgFiles = true
+ if file.Doc != nil {
+ // prioritize documentation
+ i := -1
+ switch file.Name.Name {
+ case name:
+ i = 0 // normal case: directory name matches package name
+ case "main":
+ i = 1 // directory contains a main package
+ default:
+ i = 2 // none of the above
+ }
+ if 0 <= i && i < len(synopses) && synopses[i] == "" {
+ synopses[i] = doc.Synopsis(file.Doc.Text())
+ }
+ }
+ haveSummary = synopses[0] != ""
+ }
+ }
+ }
+
+ // create subdirectory tree
+ var dirs []*Directory
+ for _, ch := range dirchs {
+ if d := <-ch; d != nil {
+ dirs = append(dirs, d)
+ }
+ }
+
+ // if there are no package files and no subdirectories
+ // containing package files, ignore the directory
+ if !hasPkgFiles && len(dirs) == 0 {
+ return nil
+ }
+
+ // select the highest-priority synopsis for the directory entry, if any
+ synopsis := ""
+ for _, synopsis = range synopses {
+ if synopsis != "" {
+ break
+ }
+ }
+
+ return &Directory{
+ Depth: depth,
+ Path: path,
+ Name: name,
+ HasPkg: hasPkgFiles && show, // TODO(bradfitz): add proper Hide field?
+ Synopsis: synopsis,
+ Dirs: dirs,
+ }
+}
+
+// newDirectory creates a new package directory tree with at most maxDepth
+// levels, anchored at root. The result tree is pruned such that it only
+// contains directories that contain package files or that contain
+// subdirectories containing package files (transitively). If a non-nil
+// pathFilter is provided, directory paths additionally must be accepted
+// by the filter (i.e., pathFilter(path) must be true). If a value >= 0 is
+// provided for maxDepth, nodes at larger depths are pruned as well; they
+// are assumed to contain package files even if their contents are not known
+// (i.e., in this case the tree may contain directories w/o any package files).
+//
+func (c *Corpus) newDirectory(root string, maxDepth int) *Directory {
+ // The root could be a symbolic link so use Stat not Lstat.
+ d, err := c.fs.Stat(root)
+ // If we fail here, report detailed error messages; otherwise
+ // is is hard to see why a directory tree was not built.
+ switch {
+ case err != nil:
+ log.Printf("newDirectory(%s): %s", root, err)
+ return nil
+ case root != "/" && !isPkgDir(d):
+ log.Printf("newDirectory(%s): not a package directory", root)
+ return nil
+ case root == "/" && !d.IsDir():
+ log.Printf("newDirectory(%s): not a directory", root)
+ return nil
+ }
+ if maxDepth < 0 {
+ maxDepth = 1e6 // "infinity"
+ }
+ b := treeBuilder{c, maxDepth}
+ // the file set provided is only for local parsing, no position
+ // information escapes and thus we don't need to save the set
+ return b.newDirTree(token.NewFileSet(), root, d.Name(), 0)
+}
+
+func (dir *Directory) writeLeafs(buf *bytes.Buffer) {
+ if dir != nil {
+ if len(dir.Dirs) == 0 {
+ buf.WriteString(dir.Path)
+ buf.WriteByte('\n')
+ return
+ }
+
+ for _, d := range dir.Dirs {
+ d.writeLeafs(buf)
+ }
+ }
+}
+
+func (dir *Directory) walk(c chan<- *Directory, skipRoot bool) {
+ if dir != nil {
+ if !skipRoot {
+ c <- dir
+ }
+ for _, d := range dir.Dirs {
+ d.walk(c, false)
+ }
+ }
+}
+
+func (dir *Directory) iter(skipRoot bool) <-chan *Directory {
+ c := make(chan *Directory)
+ go func() {
+ dir.walk(c, skipRoot)
+ close(c)
+ }()
+ return c
+}
+
+func (dir *Directory) lookupLocal(name string) *Directory {
+ for _, d := range dir.Dirs {
+ if d.Name == name {
+ return d
+ }
+ }
+ return nil
+}
+
+func splitPath(p string) []string {
+ p = strings.TrimPrefix(p, "/")
+ if p == "" {
+ return nil
+ }
+ return strings.Split(p, "/")
+}
+
+// lookup looks for the *Directory for a given path, relative to dir.
+func (dir *Directory) lookup(path string) *Directory {
+ d := splitPath(dir.Path)
+ p := splitPath(path)
+ i := 0
+ for i < len(d) {
+ if i >= len(p) || d[i] != p[i] {
+ return nil
+ }
+ i++
+ }
+ for dir != nil && i < len(p) {
+ dir = dir.lookupLocal(p[i])
+ i++
+ }
+ return dir
+}
+
+// DirEntry describes a directory entry. The Depth and Height values
+// are useful for presenting an entry in an indented fashion.
+//
+type DirEntry struct {
+ Depth int // >= 0
+ Height int // = DirList.MaxHeight - Depth, > 0
+ Path string // directory path; includes Name, relative to DirList root
+ Name string // directory name
+ HasPkg bool // true if the directory contains at least one package
+ Synopsis string // package documentation, if any
+}
+
+type DirList struct {
+ MaxHeight int // directory tree height, > 0
+ List []DirEntry
+}
+
+// listing creates a (linear) directory listing from a directory tree.
+// If skipRoot is set, the root directory itself is excluded from the list.
+// If filter is set, only the directory entries whose paths match the filter
+// are included.
+//
+func (root *Directory) listing(skipRoot bool, filter func(string) bool) *DirList {
+ if root == nil {
+ return nil
+ }
+
+ // determine number of entries n and maximum height
+ n := 0
+ minDepth := 1 << 30 // infinity
+ maxDepth := 0
+ for d := range root.iter(skipRoot) {
+ n++
+ if minDepth > d.Depth {
+ minDepth = d.Depth
+ }
+ if maxDepth < d.Depth {
+ maxDepth = d.Depth
+ }
+ }
+ maxHeight := maxDepth - minDepth + 1
+
+ if n == 0 {
+ return nil
+ }
+
+ // create list
+ list := make([]DirEntry, 0, n)
+ for d := range root.iter(skipRoot) {
+ if filter != nil && !filter(d.Path) {
+ continue
+ }
+ var p DirEntry
+ p.Depth = d.Depth - minDepth
+ p.Height = maxHeight - p.Depth
+ // the path is relative to root.Path - remove the root.Path
+ // prefix (the prefix should always be present but avoid
+ // crashes and check)
+ path := strings.TrimPrefix(d.Path, root.Path)
+ // remove leading separator if any - path must be relative
+ path = strings.TrimPrefix(path, "/")
+ p.Path = path
+ p.Name = d.Name
+ p.HasPkg = d.HasPkg
+ p.Synopsis = d.Synopsis
+ list = append(list, p)
+ }
+
+ return &DirList{maxHeight, list}
+}
diff --git a/llgo/third_party/go.tools/godoc/format.go b/llgo/third_party/go.tools/godoc/format.go
new file mode 100644
index 0000000000000000000000000000000000000000..6013238febf9253054863d2719466fa2fc959bff
--- /dev/null
+++ b/llgo/third_party/go.tools/godoc/format.go
@@ -0,0 +1,371 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements FormatSelections and FormatText.
+// FormatText is used to HTML-format Go and non-Go source
+// text with line numbers and highlighted sections. It is
+// built on top of FormatSelections, a generic formatter
+// for "selected" text.
+
+package godoc
+
+import (
+ "fmt"
+ "go/scanner"
+ "go/token"
+ "io"
+ "regexp"
+ "strconv"
+ "text/template"
+)
+
+// ----------------------------------------------------------------------------
+// Implementation of FormatSelections
+
+// A Segment describes a text segment [start, end).
+// The zero value of a Segment is a ready-to-use empty segment.
+//
+type Segment struct {
+ start, end int
+}
+
+func (seg *Segment) isEmpty() bool { return seg.start >= seg.end }
+
+// A Selection is an "iterator" function returning a text segment.
+// Repeated calls to a selection return consecutive, non-overlapping,
+// non-empty segments, followed by an infinite sequence of empty
+// segments. The first empty segment marks the end of the selection.
+//
+type Selection func() Segment
+
+// A LinkWriter writes some start or end "tag" to w for the text offset offs.
+// It is called by FormatSelections at the start or end of each link segment.
+//
+type LinkWriter func(w io.Writer, offs int, start bool)
+
+// A SegmentWriter formats a text according to selections and writes it to w.
+// The selections parameter is a bit set indicating which selections provided
+// to FormatSelections overlap with the text segment: If the n'th bit is set
+// in selections, the n'th selection provided to FormatSelections is overlapping
+// with the text.
+//
+type SegmentWriter func(w io.Writer, text []byte, selections int)
+
+// FormatSelections takes a text and writes it to w using link and segment
+// writers lw and sw as follows: lw is invoked for consecutive segment starts
+// and ends as specified through the links selection, and sw is invoked for
+// consecutive segments of text overlapped by the same selections as specified
+// by selections. The link writer lw may be nil, in which case the links
+// Selection is ignored.
+//
+func FormatSelections(w io.Writer, text []byte, lw LinkWriter, links Selection, sw SegmentWriter, selections ...Selection) {
+ // If we have a link writer, make the links
+ // selection the last entry in selections
+ if lw != nil {
+ selections = append(selections, links)
+ }
+
+ // compute the sequence of consecutive segment changes
+ changes := newMerger(selections)
+
+ // The i'th bit in bitset indicates that the text
+ // at the current offset is covered by selections[i].
+ bitset := 0
+ lastOffs := 0
+
+ // Text segments are written in a delayed fashion
+ // such that consecutive segments belonging to the
+ // same selection can be combined (peephole optimization).
+ // last describes the last segment which has not yet been written.
+ var last struct {
+ begin, end int // valid if begin < end
+ bitset int
+ }
+
+ // flush writes the last delayed text segment
+ flush := func() {
+ if last.begin < last.end {
+ sw(w, text[last.begin:last.end], last.bitset)
+ }
+ last.begin = last.end // invalidate last
+ }
+
+ // segment runs the segment [lastOffs, end) with the selection
+ // indicated by bitset through the segment peephole optimizer.
+ segment := func(end int) {
+ if lastOffs < end { // ignore empty segments
+ if last.end != lastOffs || last.bitset != bitset {
+ // the last segment is not adjacent to or
+ // differs from the new one
+ flush()
+ // start a new segment
+ last.begin = lastOffs
+ }
+ last.end = end
+ last.bitset = bitset
+ }
+ }
+
+ for {
+ // get the next segment change
+ index, offs, start := changes.next()
+ if index < 0 || offs > len(text) {
+ // no more segment changes or the next change
+ // is past the end of the text - we're done
+ break
+ }
+ // determine the kind of segment change
+ if lw != nil && index == len(selections)-1 {
+ // we have a link segment change (see start of this function):
+ // format the previous selection segment, write the
+ // link tag and start a new selection segment
+ segment(offs)
+ flush()
+ lastOffs = offs
+ lw(w, offs, start)
+ } else {
+ // we have a selection change:
+ // format the previous selection segment, determine
+ // the new selection bitset and start a new segment
+ segment(offs)
+ lastOffs = offs
+ mask := 1 << uint(index)
+ if start {
+ bitset |= mask
+ } else {
+ bitset &^= mask
+ }
+ }
+ }
+ segment(len(text))
+ flush()
+}
+
+// A merger merges a slice of Selections and produces a sequence of
+// consecutive segment change events through repeated next() calls.
+//
+type merger struct {
+ selections []Selection
+ segments []Segment // segments[i] is the next segment of selections[i]
+}
+
+const infinity int = 2e9
+
+func newMerger(selections []Selection) *merger {
+ segments := make([]Segment, len(selections))
+ for i, sel := range selections {
+ segments[i] = Segment{infinity, infinity}
+ if sel != nil {
+ if seg := sel(); !seg.isEmpty() {
+ segments[i] = seg
+ }
+ }
+ }
+ return &merger{selections, segments}
+}
+
+// next returns the next segment change: index specifies the Selection
+// to which the segment belongs, offs is the segment start or end offset
+// as determined by the start value. If there are no more segment changes,
+// next returns an index value < 0.
+//
+func (m *merger) next() (index, offs int, start bool) {
+ // find the next smallest offset where a segment starts or ends
+ offs = infinity
+ index = -1
+ for i, seg := range m.segments {
+ switch {
+ case seg.start < offs:
+ offs = seg.start
+ index = i
+ start = true
+ case seg.end < offs:
+ offs = seg.end
+ index = i
+ start = false
+ }
+ }
+ if index < 0 {
+ // no offset found => all selections merged
+ return
+ }
+ // offset found - it's either the start or end offset but
+ // either way it is ok to consume the start offset: set it
+ // to infinity so it won't be considered in the following
+ // next call
+ m.segments[index].start = infinity
+ if start {
+ return
+ }
+ // end offset found - consume it
+ m.segments[index].end = infinity
+ // advance to the next segment for that selection
+ seg := m.selections[index]()
+ if !seg.isEmpty() {
+ m.segments[index] = seg
+ }
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Implementation of FormatText
+
+// lineSelection returns the line segments for text as a Selection.
+func lineSelection(text []byte) Selection {
+ i, j := 0, 0
+ return func() (seg Segment) {
+ // find next newline, if any
+ for j < len(text) {
+ j++
+ if text[j-1] == '\n' {
+ break
+ }
+ }
+ if i < j {
+ // text[i:j] constitutes a line
+ seg = Segment{i, j}
+ i = j
+ }
+ return
+ }
+}
+
+// tokenSelection returns, as a selection, the sequence of
+// consecutive occurrences of token sel in the Go src text.
+//
+func tokenSelection(src []byte, sel token.Token) Selection {
+ var s scanner.Scanner
+ fset := token.NewFileSet()
+ file := fset.AddFile("", fset.Base(), len(src))
+ s.Init(file, src, nil, scanner.ScanComments)
+ return func() (seg Segment) {
+ for {
+ pos, tok, lit := s.Scan()
+ if tok == token.EOF {
+ break
+ }
+ offs := file.Offset(pos)
+ if tok == sel {
+ seg = Segment{offs, offs + len(lit)}
+ break
+ }
+ }
+ return
+ }
+}
+
+// makeSelection is a helper function to make a Selection from a slice of pairs.
+// Pairs describing empty segments are ignored.
+//
+func makeSelection(matches [][]int) Selection {
+ i := 0
+ return func() Segment {
+ for i < len(matches) {
+ m := matches[i]
+ i++
+ if m[0] < m[1] {
+ // non-empty segment
+ return Segment{m[0], m[1]}
+ }
+ }
+ return Segment{}
+ }
+}
+
+// regexpSelection computes the Selection for the regular expression expr in text.
+func regexpSelection(text []byte, expr string) Selection {
+ var matches [][]int
+ if rx, err := regexp.Compile(expr); err == nil {
+ matches = rx.FindAllIndex(text, -1)
+ }
+ return makeSelection(matches)
+}
+
+var selRx = regexp.MustCompile(`^([0-9]+):([0-9]+)`)
+
+// RangeSelection computes the Selection for a text range described
+// by the argument str; the range description must match the selRx
+// regular expression.
+func RangeSelection(str string) Selection {
+ m := selRx.FindStringSubmatch(str)
+ if len(m) >= 2 {
+ from, _ := strconv.Atoi(m[1])
+ to, _ := strconv.Atoi(m[2])
+ if from < to {
+ return makeSelection([][]int{{from, to}})
+ }
+ }
+ return nil
+}
+
+// Span tags for all the possible selection combinations that may
+// be generated by FormatText. Selections are indicated by a bitset,
+// and the value of the bitset specifies the tag to be used.
+//
+// bit 0: comments
+// bit 1: highlights
+// bit 2: selections
+//
+var startTags = [][]byte{
+ /* 000 */ []byte(``),
+ /* 001 */ []byte(`