diff --git a/src/libcore/str.rs b/src/libcore/str.rs index 84ffb7fb20e74..fb65976d17d89 100644 --- a/src/libcore/str.rs +++ b/src/libcore/str.rs @@ -1732,7 +1732,9 @@ impl<'a> StrSlice<'a> for &'a str { #[inline] fn slice(&self, begin: uint, end: uint) -> &'a str { - assert!(self.is_char_boundary(begin) && self.is_char_boundary(end)); + assert!(self.is_char_boundary(begin) && self.is_char_boundary(end), + "index {} or {} in `{}` do not lie on character boundary", begin, + end, *self); unsafe { raw::slice_bytes(*self, begin, end) } } @@ -1743,7 +1745,8 @@ impl<'a> StrSlice<'a> for &'a str { #[inline] fn slice_to(&self, end: uint) -> &'a str { - assert!(self.is_char_boundary(end)); + assert!(self.is_char_boundary(end), "index {} in `{}` does not lie on \ + a character boundary", end, *self); unsafe { raw::slice_bytes(*self, 0, end) } } diff --git a/src/libsyntax/abi.rs b/src/libsyntax/abi.rs index 3d6266fd4c09d..d607810af84a8 100644 --- a/src/libsyntax/abi.rs +++ b/src/libsyntax/abi.rs @@ -60,9 +60,12 @@ pub struct AbiData { } pub enum AbiArchitecture { - RustArch, // Not a real ABI (e.g., intrinsic) - AllArch, // An ABI that specifies cross-platform defaults (e.g., "C") - Archs(u32) // Multiple architectures (bitset) + /// Not a real ABI (e.g., intrinsic) + RustArch, + /// An ABI that specifies cross-platform defaults (e.g., "C") + AllArch, + /// Multiple architectures (bitset) + Archs(u32) } static AbiDatas: &'static [AbiData] = &[ @@ -84,21 +87,13 @@ static AbiDatas: &'static [AbiData] = &[ AbiData {abi: RustIntrinsic, name: "rust-intrinsic", abi_arch: RustArch}, ]; +/// Iterates through each of the defined ABIs. fn each_abi(op: |abi: Abi| -> bool) -> bool { - /*! - * - * Iterates through each of the defined ABIs. - */ - AbiDatas.iter().advance(|abi_data| op(abi_data.abi)) } +/// Returns the ABI with the given name (if any). pub fn lookup(name: &str) -> Option { - /*! - * - * Returns the ABI with the given name (if any). - */ - let mut res = None; each_abi(|abi| { diff --git a/src/libsyntax/ast.rs b/src/libsyntax/ast.rs index aeafc0e306c21..d266e5e42a024 100644 --- a/src/libsyntax/ast.rs +++ b/src/libsyntax/ast.rs @@ -24,7 +24,8 @@ use std::rc::Rc; use std::gc::{Gc, GC}; use serialize::{Encodable, Decodable, Encoder, Decoder}; -/// A pointer abstraction. FIXME(eddyb) #10676 use Rc in the future. +/// A pointer abstraction. +// FIXME(eddyb) #10676 use Rc in the future. pub type P = Gc; #[allow(non_snake_case_functions)] @@ -36,11 +37,11 @@ pub fn P(value: T) -> P { // FIXME #6993: in librustc, uses of "ident" should be replaced // by just "Name". -// an identifier contains a Name (index into the interner -// table) and a SyntaxContext to track renaming and -// macro expansion per Flatt et al., "Macros -// That Work Together" -#[deriving(Clone, Hash, PartialOrd, Eq, Ord, Show)] +/// An identifier contains a Name (index into the interner +/// table) and a SyntaxContext to track renaming and +/// macro expansion per Flatt et al., "Macros +/// That Work Together" +#[deriving(Clone, Hash, PartialOrd, Eq, Ord)] pub struct Ident { pub name: Name, pub ctxt: SyntaxContext @@ -51,6 +52,12 @@ impl Ident { pub fn new(name: Name) -> Ident { Ident {name: name, ctxt: EMPTY_CTXT}} } +impl Show for Ident { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + format_args!(|a| f.write_fmt(a), "\"{}\"", token::get_ident(*self).get()) + } +} + impl PartialEq for Ident { fn eq(&self, other: &Ident) -> bool { if self.ctxt == other.ctxt { @@ -122,10 +129,9 @@ pub struct Lifetime { pub name: Name } -// a "Path" is essentially Rust's notion of a name; -// for instance: std::cmp::PartialEq . It's represented -// as a sequence of identifiers, along with a bunch -// of supporting information. +/// A "Path" is essentially Rust's notion of a name; for instance: +/// std::cmp::PartialEq . It's represented as a sequence of identifiers, +/// along with a bunch of supporting information. #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub struct Path { pub span: Span, @@ -163,15 +169,15 @@ pub struct DefId { pub static LOCAL_CRATE: CrateNum = 0; pub static CRATE_NODE_ID: NodeId = 0; -// When parsing and doing expansions, we initially give all AST nodes this AST -// node value. Then later, in the renumber pass, we renumber them to have -// small, positive ids. +/// When parsing and doing expansions, we initially give all AST nodes this AST +/// node value. Then later, in the renumber pass, we renumber them to have +/// small, positive ids. pub static DUMMY_NODE_ID: NodeId = -1; -// The AST represents all type param bounds as types. -// typeck::collect::compute_bounds matches these against -// the "special" built-in traits (see middle::lang_items) and -// detects Copy, Send and Share. +/// The AST represents all type param bounds as types. +/// typeck::collect::compute_bounds matches these against +/// the "special" built-in traits (see middle::lang_items) and +/// detects Copy, Send and Share. #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub enum TyParamBound { TraitTyParamBound(TraitRef), @@ -208,9 +214,9 @@ impl Generics { } } -// The set of MetaItems that define the compilation environment of the crate, -// used to drive conditional compilation -pub type CrateConfig = Vec>; +/// The set of MetaItems that define the compilation environment of the crate, +/// used to drive conditional compilation +pub type CrateConfig = Vec> ; #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub struct Crate { @@ -287,13 +293,13 @@ pub enum BindingMode { pub enum Pat_ { PatWild, PatWildMulti, - // A PatIdent may either be a new bound variable, - // or a nullary enum (in which case the second field - // is None). - // In the nullary enum case, the parser can't determine - // which it is. The resolver determines this, and - // records this pattern's NodeId in an auxiliary - // set (of "pat_idents that refer to nullary enums") + /// A PatIdent may either be a new bound variable, + /// or a nullary enum (in which case the second field + /// is None). + /// In the nullary enum case, the parser can't determine + /// which it is. The resolver determines this, and + /// records this pattern's NodeId in an auxiliary + /// set (of "pat_idents that refer to nullary enums") PatIdent(BindingMode, Path, Option>), PatEnum(Path, Option>>), /* "none" means a * pattern where * we don't bind the fields to names */ @@ -303,8 +309,8 @@ pub enum Pat_ { PatRegion(Gc), // reference pattern PatLit(Gc), PatRange(Gc, Gc), - // [a, b, ..i, y, z] is represented as - // PatVec(~[a, b], Some(i), ~[y, z]) + /// [a, b, ..i, y, z] is represented as: + /// PatVec(~[a, b], Some(i), ~[y, z]) PatVec(Vec>, Option>, Vec>), PatMac(Mac), } @@ -317,9 +323,12 @@ pub enum Mutability { #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub enum ExprVstore { - ExprVstoreUniq, // ~[1,2,3,4] - ExprVstoreSlice, // &[1,2,3,4] - ExprVstoreMutSlice, // &mut [1,2,3,4] + /// ~[1, 2, 3, 4] + ExprVstoreUniq, + /// &[1, 2, 3, 4] + ExprVstoreSlice, + /// &mut [1, 2, 3, 4] + ExprVstoreMutSlice, } #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] @@ -357,16 +366,16 @@ pub type Stmt = Spanned; #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub enum Stmt_ { - // could be an item or a local (let) binding: + /// Could be an item or a local (let) binding: StmtDecl(Gc, NodeId), - // expr without trailing semi-colon (must have unit type): + /// Expr without trailing semi-colon (must have unit type): StmtExpr(Gc, NodeId), - // expr with trailing semi-colon (may have any type): + /// Expr with trailing semi-colon (may have any type): StmtSemi(Gc, NodeId), - // bool: is there a trailing sem-colon? + /// bool: is there a trailing sem-colon? StmtMac(Mac, bool), } @@ -395,9 +404,9 @@ pub type Decl = Spanned; #[deriving(PartialEq, Eq, Encodable, Decodable, Hash)] pub enum Decl_ { - // a local (let) binding: + /// A local (let) binding: DeclLocal(Gc), - // an item binding: + /// An item binding: DeclItem(Gc), } @@ -440,7 +449,7 @@ pub struct Expr { #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub enum Expr_ { ExprVstore(Gc, ExprVstore), - // First expr is the place; second expr is the value. + /// First expr is the place; second expr is the value. ExprBox(Gc, Gc), ExprVec(Vec>), ExprCall(Gc, Vec>), @@ -481,124 +490,121 @@ pub enum Expr_ { ExprMac(Mac), - // A struct literal expression. + /// A struct literal expression. ExprStruct(Path, Vec , Option> /* base */), - // A vector literal constructed from one repeated element. + /// A vector literal constructed from one repeated element. ExprRepeat(Gc /* element */, Gc /* count */), - // No-op: used solely so we can pretty-print faithfully + /// No-op: used solely so we can pretty-print faithfully ExprParen(Gc) } -// When the main rust parser encounters a syntax-extension invocation, it -// parses the arguments to the invocation as a token-tree. This is a very -// loose structure, such that all sorts of different AST-fragments can -// be passed to syntax extensions using a uniform type. -// -// If the syntax extension is an MBE macro, it will attempt to match its -// LHS "matchers" against the provided token tree, and if it finds a -// match, will transcribe the RHS token tree, splicing in any captured -// macro_parser::matched_nonterminals into the TTNonterminals it finds. -// -// The RHS of an MBE macro is the only place a TTNonterminal or TTSeq -// makes any real sense. You could write them elsewhere but nothing -// else knows what to do with them, so you'll probably get a syntax -// error. -// +/// When the main rust parser encounters a syntax-extension invocation, it +/// parses the arguments to the invocation as a token-tree. This is a very +/// loose structure, such that all sorts of different AST-fragments can +/// be passed to syntax extensions using a uniform type. +/// +/// If the syntax extension is an MBE macro, it will attempt to match its +/// LHS "matchers" against the provided token tree, and if it finds a +/// match, will transcribe the RHS token tree, splicing in any captured +/// macro_parser::matched_nonterminals into the TTNonterminals it finds. +/// +/// The RHS of an MBE macro is the only place a TTNonterminal or TTSeq +/// makes any real sense. You could write them elsewhere but nothing +/// else knows what to do with them, so you'll probably get a syntax +/// error. #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] #[doc="For macro invocations; parsing is delegated to the macro"] pub enum TokenTree { - // a single token + /// A single token TTTok(Span, ::parse::token::Token), - // a delimited sequence (the delimiters appear as the first - // and last elements of the vector) + /// A delimited sequence (the delimiters appear as the first + /// and last elements of the vector) // FIXME(eddyb) #6308 Use Rc<[TokenTree]> after DST. TTDelim(Rc>), // These only make sense for right-hand-sides of MBE macros: - // a kleene-style repetition sequence with a span, a TTForest, - // an optional separator, and a boolean where true indicates - // zero or more (..), and false indicates one or more (+). + /// A kleene-style repetition sequence with a span, a TTForest, + /// an optional separator, and a boolean where true indicates + /// zero or more (..), and false indicates one or more (+). // FIXME(eddyb) #6308 Use Rc<[TokenTree]> after DST. TTSeq(Span, Rc>, Option<::parse::token::Token>, bool), - // a syntactic variable that will be filled in by macro expansion. + /// A syntactic variable that will be filled in by macro expansion. TTNonterminal(Span, Ident) } -// -// Matchers are nodes defined-by and recognized-by the main rust parser and -// language, but they're only ever found inside syntax-extension invocations; -// indeed, the only thing that ever _activates_ the rules in the rust parser -// for parsing a matcher is a matcher looking for the 'matchers' nonterminal -// itself. Matchers represent a small sub-language for pattern-matching -// token-trees, and are thus primarily used by the macro-defining extension -// itself. -// -// MatchTok -// -------- -// -// A matcher that matches a single token, denoted by the token itself. So -// long as there's no $ involved. -// -// -// MatchSeq -// -------- -// -// A matcher that matches a sequence of sub-matchers, denoted various -// possible ways: -// -// $(M)* zero or more Ms -// $(M)+ one or more Ms -// $(M),+ one or more comma-separated Ms -// $(A B C);* zero or more semi-separated 'A B C' seqs -// -// -// MatchNonterminal -// ----------------- -// -// A matcher that matches one of a few interesting named rust -// nonterminals, such as types, expressions, items, or raw token-trees. A -// black-box matcher on expr, for example, binds an expr to a given ident, -// and that ident can re-occur as an interpolation in the RHS of a -// macro-by-example rule. For example: -// -// $foo:expr => 1 + $foo // interpolate an expr -// $foo:tt => $foo // interpolate a token-tree -// $foo:tt => bar! $foo // only other valid interpolation -// // is in arg position for another -// // macro -// -// As a final, horrifying aside, note that macro-by-example's input is -// also matched by one of these matchers. Holy self-referential! It is matched -// by a MatchSeq, specifically this one: -// -// $( $lhs:matchers => $rhs:tt );+ -// -// If you understand that, you have closed to loop and understand the whole -// macro system. Congratulations. -// +/// Matchers are nodes defined-by and recognized-by the main rust parser and +/// language, but they're only ever found inside syntax-extension invocations; +/// indeed, the only thing that ever _activates_ the rules in the rust parser +/// for parsing a matcher is a matcher looking for the 'matchers' nonterminal +/// itself. Matchers represent a small sub-language for pattern-matching +/// token-trees, and are thus primarily used by the macro-defining extension +/// itself. +/// +/// MatchTok +/// -------- +/// +/// A matcher that matches a single token, denoted by the token itself. So +/// long as there's no $ involved. +/// +/// +/// MatchSeq +/// -------- +/// +/// A matcher that matches a sequence of sub-matchers, denoted various +/// possible ways: +/// +/// $(M)* zero or more Ms +/// $(M)+ one or more Ms +/// $(M),+ one or more comma-separated Ms +/// $(A B C);* zero or more semi-separated 'A B C' seqs +/// +/// +/// MatchNonterminal +/// ----------------- +/// +/// A matcher that matches one of a few interesting named rust +/// nonterminals, such as types, expressions, items, or raw token-trees. A +/// black-box matcher on expr, for example, binds an expr to a given ident, +/// and that ident can re-occur as an interpolation in the RHS of a +/// macro-by-example rule. For example: +/// +/// $foo:expr => 1 + $foo // interpolate an expr +/// $foo:tt => $foo // interpolate a token-tree +/// $foo:tt => bar! $foo // only other valid interpolation +/// // is in arg position for another +/// // macro +/// +/// As a final, horrifying aside, note that macro-by-example's input is +/// also matched by one of these matchers. Holy self-referential! It is matched +/// by a MatchSeq, specifically this one: +/// +/// $( $lhs:matchers => $rhs:tt );+ +/// +/// If you understand that, you have closed the loop and understand the whole +/// macro system. Congratulations. pub type Matcher = Spanned; #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub enum Matcher_ { - // match one token + /// Match one token MatchTok(::parse::token::Token), - // match repetitions of a sequence: body, separator, zero ok?, - // lo, hi position-in-match-array used: + /// Match repetitions of a sequence: body, separator, zero ok?, + /// lo, hi position-in-match-array used: MatchSeq(Vec , Option<::parse::token::Token>, bool, uint, uint), - // parse a Rust NT: name to bind, name of NT, position in match array: + /// Parse a Rust NT: name to bind, name of NT, position in match array: MatchNonterminal(Ident, Ident, uint) } pub type Mac = Spanned; -// represents a macro invocation. The Path indicates which macro -// is being invoked, and the vector of token-trees contains the source -// of the macro invocation. -// There's only one flavor, now, so this could presumably be simplified. +/// Represents a macro invocation. The Path indicates which macro +/// is being invoked, and the vector of token-trees contains the source +/// of the macro invocation. +/// There's only one flavor, now, so this could presumably be simplified. #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub enum Mac_ { MacInvocTT(Path, Vec , SyntaxContext), // new macro-invocation @@ -655,9 +661,9 @@ pub struct TypeMethod { pub vis: Visibility, } -// A trait method is either required (meaning it doesn't have an -// implementation, just a signature) or provided (meaning it has a default -// implementation). +/// A trait method is either required (meaning it doesn't have an +/// implementation, just a signature) or provided (meaning it has a default +/// implementation). #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub enum TraitMethod { Required(TypeMethod), @@ -717,7 +723,7 @@ pub struct Ty { pub span: Span, } -// Not represented directly in the AST, referred to by name through a ty_path. +/// Not represented directly in the AST, referred to by name through a ty_path. #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub enum PrimTy { TyInt(IntTy), @@ -749,10 +755,10 @@ pub struct ClosureTy { pub fn_style: FnStyle, pub onceness: Onceness, pub decl: P, - // Optional optvec distinguishes between "fn()" and "fn:()" so we can - // implement issue #7264. None means "fn()", which means infer a default - // bound based on pointer sigil during typeck. Some(Empty) means "fn:()", - // which means use no bounds (e.g., not even Owned on a ~fn()). + /// Optional optvec distinguishes between "fn()" and "fn:()" so we can + /// implement issue #7264. None means "fn()", which means infer a default + /// bound based on pointer sigil during typeck. Some(Empty) means "fn:()", + /// which means use no bounds (e.g., not even Owned on a ~fn()). pub bounds: Option>, } @@ -785,11 +791,11 @@ pub enum Ty_ { TyUnboxedFn(Gc), TyTup(Vec> ), TyPath(Path, Option>, NodeId), // for #7264; see above - // No-op; kept solely so that we can pretty-print faithfully + /// No-op; kept solely so that we can pretty-print faithfully TyParen(P), TyTypeof(Gc), - // TyInfer means the type should be inferred instead of it having been - // specified. This can appear anywhere in a type. + /// TyInfer means the type should be inferred instead of it having been + /// specified. This can appear anywhere in a type. TyInfer, } @@ -848,8 +854,10 @@ pub struct FnDecl { #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub enum FnStyle { - UnsafeFn, // declared with "unsafe fn" - NormalFn, // declared with "fn" + /// Declared with "unsafe fn" + UnsafeFn, + /// Declared with "fn" + NormalFn, } impl fmt::Show for FnStyle { @@ -863,17 +871,23 @@ impl fmt::Show for FnStyle { #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub enum RetStyle { - NoReturn, // functions with return type _|_ that always - // raise an error or exit (i.e. never return to the caller) - Return, // everything else + /// Functions with return type ! that always + /// raise an error or exit (i.e. never return to the caller) + NoReturn, + /// Everything else + Return, } #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub enum ExplicitSelf_ { - SelfStatic, // no self - SelfValue, // `self` - SelfRegion(Option, Mutability), // `&'lt self`, `&'lt mut self` - SelfUniq // `~self` + /// No self + SelfStatic, + /// `self + SelfValue, + /// `&'lt self`, `&'lt mut self` + SelfRegion(Option, Mutability), + /// `~self` + SelfUniq } pub type ExplicitSelf = Spanned; @@ -951,17 +965,17 @@ pub type ViewPath = Spanned; #[deriving(PartialEq, Eq, Encodable, Decodable, Hash)] pub enum ViewPath_ { - // quux = foo::bar::baz - // - // or just - // - // foo::bar::baz (with 'baz =' implicitly on the left) + /// `quux = foo::bar::baz` + /// + /// or just + /// + /// `foo::bar::baz ` (with 'baz =' implicitly on the left) ViewPathSimple(Ident, Path, NodeId), - // foo::bar::* + /// `foo::bar::*` ViewPathGlob(Path, NodeId), - // foo::bar::{a,b,c} + /// `foo::bar::{a,b,c}` ViewPathList(Path, Vec , NodeId) } @@ -975,20 +989,20 @@ pub struct ViewItem { #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub enum ViewItem_ { - // ident: name used to refer to this crate in the code - // optional (InternedString,StrStyle): if present, this is a location - // (containing arbitrary characters) from which to fetch the crate sources - // For example, extern crate whatever = "github.com/rust-lang/rust" + /// Ident: name used to refer to this crate in the code + /// optional (InternedString,StrStyle): if present, this is a location + /// (containing arbitrary characters) from which to fetch the crate sources + /// For example, extern crate whatever = "github.com/rust-lang/rust" ViewItemExternCrate(Ident, Option<(InternedString,StrStyle)>, NodeId), ViewItemUse(Gc), } -// Meta-data associated with an item +/// Meta-data associated with an item pub type Attribute = Spanned; -// Distinguishes between Attributes that decorate items and Attributes that -// are contained as statements within items. These two cases need to be -// distinguished for pretty-printing. +/// Distinguishes between Attributes that decorate items and Attributes that +/// are contained as statements within items. These two cases need to be +/// distinguished for pretty-printing. #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub enum AttrStyle { AttrOuter, @@ -998,7 +1012,7 @@ pub enum AttrStyle { #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub struct AttrId(pub uint); -// doc-comments are promoted to attributes that have is_sugared_doc = true +/// Doc-comments are promoted to attributes that have is_sugared_doc = true #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub struct Attribute_ { pub id: AttrId, @@ -1007,13 +1021,12 @@ pub struct Attribute_ { pub is_sugared_doc: bool, } -/* - TraitRef's appear in impls. - resolve maps each TraitRef's ref_id to its defining trait; that's all - that the ref_id is for. The impl_id maps to the "self type" of this impl. - If this impl is an ItemImpl, the impl_id is redundant (it could be the - same as the impl's node id). - */ + +/// TraitRef's appear in impls. +/// resolve maps each TraitRef's ref_id to its defining trait; that's all +/// that the ref_id is for. The impl_id maps to the "self type" of this impl. +/// If this impl is an ItemImpl, the impl_id is redundant (it could be the +/// same as the impl's node id). #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub struct TraitRef { pub path: Path, @@ -1063,7 +1076,8 @@ pub type StructField = Spanned; #[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)] pub enum StructFieldKind { NamedField(Ident, Visibility), - UnnamedField(Visibility), // element of a tuple-like struct + /// Element of a tuple-like struct + UnnamedField(Visibility), } impl StructFieldKind { @@ -1077,12 +1091,15 @@ impl StructFieldKind { #[deriving(PartialEq, Eq, Encodable, Decodable, Hash)] pub struct StructDef { - pub fields: Vec, /* fields, not including ctor */ - /* ID of the constructor. This is only used for tuple- or enum-like - * structs. */ + /// Fields, not including ctor + pub fields: Vec, + /// ID of the constructor. This is only used for tuple- or enum-like + /// structs. pub ctor_id: Option, - pub super_struct: Option>, // Super struct, if specified. - pub is_virtual: bool, // True iff the struct may be inherited from. + /// Super struct, if specified. + pub super_struct: Option>, + /// True iff the struct may be inherited from. + pub is_virtual: bool, } /* @@ -1113,7 +1130,7 @@ pub enum Item_ { Option, // (optional) trait this impl implements P, // self Vec>), - // a macro invocation (which includes macro definition) + /// A macro invocation (which includes macro definition) ItemMac(Mac), } @@ -1133,9 +1150,9 @@ pub enum ForeignItem_ { ForeignItemStatic(P, /* is_mutbl */ bool), } -// The data we save and restore about an inlined item or method. This is not -// part of the AST that we parse from a file, but it becomes part of the tree -// that we trans. +/// The data we save and restore about an inlined item or method. This is not +/// part of the AST that we parse from a file, but it becomes part of the tree +/// that we trans. #[deriving(PartialEq, Eq, Encodable, Decodable, Hash)] pub enum InlinedItem { IIItem(Gc), diff --git a/src/libsyntax/ast_map.rs b/src/libsyntax/ast_map.rs index 828e9ab12c251..358cd5ffd6814 100644 --- a/src/libsyntax/ast_map.rs +++ b/src/libsyntax/ast_map.rs @@ -112,13 +112,13 @@ pub enum Node { NodeLifetime(Gc), } -// The odd layout is to bring down the total size. +/// The odd layout is to bring down the total size. #[deriving(Clone)] enum MapEntry { - // Placeholder for holes in the map. + /// Placeholder for holes in the map. NotPresent, - // All the node types, with a parent ID. + /// All the node types, with a parent ID. EntryItem(NodeId, Gc), EntryForeignItem(NodeId, Gc), EntryTraitMethod(NodeId, Gc), @@ -133,14 +133,14 @@ enum MapEntry { EntryStructCtor(NodeId, Gc), EntryLifetime(NodeId, Gc), - // Roots for node trees. + /// Roots for node trees. RootCrate, RootInlinedParent(P) } struct InlinedParent { path: Vec , - // Required by NodeTraitMethod and NodeMethod. + /// Required by NodeTraitMethod and NodeMethod. def_id: DefId } @@ -243,7 +243,7 @@ impl Map { ItemForeignMod(ref nm) => Some(nm.abi), _ => None }, - // Wrong but OK, because the only inlined foreign items are intrinsics. + /// Wrong but OK, because the only inlined foreign items are intrinsics. Some(RootInlinedParent(_)) => Some(abi::RustIntrinsic), _ => None }; @@ -432,8 +432,8 @@ pub trait FoldOps { pub struct Ctx<'a, F> { map: &'a Map, - // The node in which we are currently mapping (an item or a method). - // When equal to DUMMY_NODE_ID, the next mapped node becomes the parent. + /// The node in which we are currently mapping (an item or a method). + /// When equal to DUMMY_NODE_ID, the next mapped node becomes the parent. parent: NodeId, fold_ops: F } @@ -618,9 +618,9 @@ pub fn map_crate(krate: Crate, fold_ops: F) -> (Crate, Map) { (krate, map) } -// Used for items loaded from external crate that are being inlined into this -// crate. The `path` should be the path to the item but should not include -// the item itself. +/// Used for items loaded from external crate that are being inlined into this +/// crate. The `path` should be the path to the item but should not include +/// the item itself. pub fn map_decoded_item(map: &Map, path: Vec , fold_ops: F, diff --git a/src/libsyntax/ast_util.rs b/src/libsyntax/ast_util.rs index d28553da69173..a41e5b173a638 100644 --- a/src/libsyntax/ast_util.rs +++ b/src/libsyntax/ast_util.rs @@ -33,8 +33,8 @@ pub fn path_name_i(idents: &[Ident]) -> String { }).collect::>().connect("::").to_string() } -// totally scary function: ignores all but the last element, should have -// a different name +/// Totally scary function: ignores all but the last element, should have +/// a different name pub fn path_to_ident(path: &Path) -> Ident { path.segments.last().unwrap().identifier } @@ -112,8 +112,8 @@ pub enum SuffixMode { AutoSuffix, } -// Get a string representation of a signed int type, with its value. -// We want to avoid "45int" and "-3int" in favor of "45" and "-3" +/// Get a string representation of a signed int type, with its value. +/// We want to avoid "45int" and "-3int" in favor of "45" and "-3" pub fn int_ty_to_str(t: IntTy, val: Option, mode: SuffixMode) -> String { let s = match t { TyI if val.is_some() => match mode { @@ -145,8 +145,8 @@ pub fn int_ty_max(t: IntTy) -> u64 { } } -// Get a string representation of an unsigned int type, with its value. -// We want to avoid "42uint" in favor of "42u" +/// Get a string representation of an unsigned int type, with its value. +/// We want to avoid "42uint" in favor of "42u" pub fn uint_ty_to_str(t: UintTy, val: Option, mode: SuffixMode) -> String { let s = match t { TyU if val.is_some() => match mode { @@ -265,8 +265,8 @@ pub fn public_methods(ms: Vec> ) -> Vec> { }).collect() } -// extract a TypeMethod from a TraitMethod. if the TraitMethod is -// a default, pull out the useful fields to make a TypeMethod +/// extract a TypeMethod from a TraitMethod. if the TraitMethod is +/// a default, pull out the useful fields to make a TypeMethod pub fn trait_method_to_ty_method(method: &TraitMethod) -> TypeMethod { match *method { Required(ref m) => (*m).clone(), @@ -721,7 +721,7 @@ pub fn segments_name_eq(a : &[ast::PathSegment], b : &[ast::PathSegment]) -> boo } } -// Returns true if this literal is a string and false otherwise. +/// Returns true if this literal is a string and false otherwise. pub fn lit_is_str(lit: Gc) -> bool { match lit.node { LitStr(..) => true, diff --git a/src/libsyntax/attr.rs b/src/libsyntax/attr.rs index a037c0ac07e0e..0528ea7510fda 100644 --- a/src/libsyntax/attr.rs +++ b/src/libsyntax/attr.rs @@ -47,10 +47,8 @@ pub trait AttrMetaMethods { /// #[foo="bar"] and #[foo(bar)] fn name(&self) -> InternedString; - /** - * Gets the string value if self is a MetaNameValue variant - * containing a string, otherwise None. - */ + /// Gets the string value if self is a MetaNameValue variant + /// containing a string, otherwise None. fn value_str(&self) -> Option; /// Gets a list of inner meta items from a list MetaItem type. fn meta_item_list<'a>(&'a self) -> Option<&'a [Gc]>; @@ -424,18 +422,16 @@ pub fn require_unique_names(diagnostic: &SpanHandler, metas: &[Gc]) { } -/** - * Fold this over attributes to parse #[repr(...)] forms. - * - * Valid repr contents: any of the primitive integral type names (see - * `int_type_of_word`, below) to specify the discriminant type; and `C`, to use - * the same discriminant size that the corresponding C enum would. These are - * not allowed on univariant or zero-variant enums, which have no discriminant. - * - * If a discriminant type is so specified, then the discriminant will be - * present (before fields, if any) with that type; reprensentation - * optimizations which would remove it will not be done. - */ +/// Fold this over attributes to parse #[repr(...)] forms. +/// +/// Valid repr contents: any of the primitive integral type names (see +/// `int_type_of_word`, below) to specify the discriminant type; and `C`, to use +/// the same discriminant size that the corresponding C enum would. These are +/// not allowed on univariant or zero-variant enums, which have no discriminant. +/// +/// If a discriminant type is so specified, then the discriminant will be +/// present (before fields, if any) with that type; reprensentation +/// optimizations which would remove it will not be done. pub fn find_repr_attr(diagnostic: &SpanHandler, attr: &Attribute, acc: ReprAttr) -> ReprAttr { let mut acc = acc; diff --git a/src/libsyntax/codemap.rs b/src/libsyntax/codemap.rs index c917198e7d471..60094975e7eb5 100644 --- a/src/libsyntax/codemap.rs +++ b/src/libsyntax/codemap.rs @@ -252,15 +252,15 @@ pub struct FileMap { } impl FileMap { - // EFFECT: register a start-of-line offset in the - // table of line-beginnings. - // UNCHECKED INVARIANT: these offsets must be added in the right - // order and must be in the right places; there is shared knowledge - // about what ends a line between this file and parse.rs - // WARNING: pos param here is the offset relative to start of CodeMap, - // and CodeMap will append a newline when adding a filemap without a newline at the end, - // so the safe way to call this is with value calculated as - // filemap.start_pos + newline_offset_relative_to_the_start_of_filemap. + /// EFFECT: register a start-of-line offset in the + /// table of line-beginnings. + /// UNCHECKED INVARIANT: these offsets must be added in the right + /// order and must be in the right places; there is shared knowledge + /// about what ends a line between this file and parse.rs + /// WARNING: pos param here is the offset relative to start of CodeMap, + /// and CodeMap will append a newline when adding a filemap without a newline at the end, + /// so the safe way to call this is with value calculated as + /// filemap.start_pos + newline_offset_relative_to_the_start_of_filemap. pub fn next_line(&self, pos: BytePos) { // the new charpos must be > the last one (or it's the first one). let mut lines = self.lines.borrow_mut();; @@ -269,7 +269,7 @@ impl FileMap { lines.push(pos); } - // get a line from the list of pre-computed line-beginnings + /// get a line from the list of pre-computed line-beginnings pub fn get_line(&self, line: int) -> String { let mut lines = self.lines.borrow_mut(); let begin: BytePos = *lines.get(line as uint) - self.start_pos; @@ -428,9 +428,8 @@ impl CodeMap { FileMapAndBytePos {fm: fm, pos: offset} } - // Converts an absolute BytePos to a CharPos relative to the filemap and above. + /// Converts an absolute BytePos to a CharPos relative to the filemap and above. pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos { - debug!("codemap: converting {:?} to char pos", bpos); let idx = self.lookup_filemap_idx(bpos); let files = self.files.borrow(); let map = files.get(idx); @@ -439,7 +438,7 @@ impl CodeMap { let mut total_extra_bytes = 0; for mbc in map.multibyte_chars.borrow().iter() { - debug!("codemap: {:?}-byte char at {:?}", mbc.bytes, mbc.pos); + debug!("{}-byte char at {}", mbc.bytes, mbc.pos); if mbc.pos < bpos { // every character is at least one byte, so we only // count the actual extra bytes. @@ -514,11 +513,11 @@ impl CodeMap { let chpos = self.bytepos_to_file_charpos(pos); let linebpos = *f.lines.borrow().get(a); let linechpos = self.bytepos_to_file_charpos(linebpos); - debug!("codemap: byte pos {:?} is on the line at byte pos {:?}", + debug!("byte pos {} is on the line at byte pos {}", pos, linebpos); - debug!("codemap: char pos {:?} is on the line at char pos {:?}", + debug!("char pos {} is on the line at char pos {}", chpos, linechpos); - debug!("codemap: byte is on line: {:?}", line); + debug!("byte is on line: {}", line); assert!(chpos >= linechpos); Loc { file: f, diff --git a/src/libsyntax/diagnostic.rs b/src/libsyntax/diagnostic.rs index 16c463f0c96ff..4b95df17d9a44 100644 --- a/src/libsyntax/diagnostic.rs +++ b/src/libsyntax/diagnostic.rs @@ -20,7 +20,7 @@ use std::iter::range; use std::string::String; use term; -// maximum number of lines we will print for each error; arbitrary. +/// maximum number of lines we will print for each error; arbitrary. static MAX_LINES: uint = 6u; #[deriving(Clone)] @@ -72,9 +72,9 @@ pub struct FatalError; /// or `.span_bug` rather than a failed assertion, etc. pub struct ExplicitBug; -// a span-handler is like a handler but also -// accepts span information for source-location -// reporting. +/// A span-handler is like a handler but also +/// accepts span information for source-location +/// reporting. pub struct SpanHandler { pub handler: Handler, pub cm: codemap::CodeMap, @@ -113,9 +113,9 @@ impl SpanHandler { } } -// a handler deals with errors; certain errors -// (fatal, bug, unimpl) may cause immediate exit, -// others log errors for later reporting. +/// A handler deals with errors; certain errors +/// (fatal, bug, unimpl) may cause immediate exit, +/// others log errors for later reporting. pub struct Handler { err_count: Cell, emit: RefCell>, @@ -441,12 +441,12 @@ fn highlight_lines(err: &mut EmitterWriter, Ok(()) } -// Here are the differences between this and the normal `highlight_lines`: -// `custom_highlight_lines` will always put arrow on the last byte of the -// span (instead of the first byte). Also, when the span is too long (more -// than 6 lines), `custom_highlight_lines` will print the first line, then -// dot dot dot, then last line, whereas `highlight_lines` prints the first -// six lines. +/// Here are the differences between this and the normal `highlight_lines`: +/// `custom_highlight_lines` will always put arrow on the last byte of the +/// span (instead of the first byte). Also, when the span is too long (more +/// than 6 lines), `custom_highlight_lines` will print the first line, then +/// dot dot dot, then last line, whereas `highlight_lines` prints the first +/// six lines. fn custom_highlight_lines(w: &mut EmitterWriter, cm: &codemap::CodeMap, sp: Span, diff --git a/src/libsyntax/ext/base.rs b/src/libsyntax/ext/base.rs index 0d8373eac3c7f..2c3a91c5f1b97 100644 --- a/src/libsyntax/ext/base.rs +++ b/src/libsyntax/ext/base.rs @@ -269,9 +269,9 @@ pub enum SyntaxExtension { pub type NamedSyntaxExtension = (Name, SyntaxExtension); pub struct BlockInfo { - // should macros escape from this scope? + /// Should macros escape from this scope? pub macros_escape: bool, - // what are the pending renames? + /// What are the pending renames? pub pending_renames: RenameList, } @@ -284,11 +284,11 @@ impl BlockInfo { } } -// a list of ident->name renamings +/// A list of ident->name renamings pub type RenameList = Vec<(ast::Ident, Name)>; -// The base map of methods for expanding syntax extension -// AST nodes into full ASTs +/// The base map of methods for expanding syntax extension +/// AST nodes into full ASTs pub fn syntax_expander_table() -> SyntaxEnv { // utility function to simplify creating NormalTT syntax extensions fn builtin_normal_expander(f: MacroExpanderFn) -> SyntaxExtension { @@ -392,9 +392,9 @@ pub fn syntax_expander_table() -> SyntaxEnv { syntax_expanders } -// One of these is made during expansion and incrementally updated as we go; -// when a macro expansion occurs, the resulting nodes have the backtrace() -// -> expn_info of their expansion context stored into their span. +/// One of these is made during expansion and incrementally updated as we go; +/// when a macro expansion occurs, the resulting nodes have the backtrace() +/// -> expn_info of their expansion context stored into their span. pub struct ExtCtxt<'a> { pub parse_sess: &'a parse::ParseSess, pub cfg: ast::CrateConfig, @@ -605,11 +605,11 @@ pub fn get_exprs_from_tts(cx: &mut ExtCtxt, Some(es) } -// in order to have some notion of scoping for macros, -// we want to implement the notion of a transformation -// environment. +/// In order to have some notion of scoping for macros, +/// we want to implement the notion of a transformation +/// environment. -// This environment maps Names to SyntaxExtensions. +/// This environment maps Names to SyntaxExtensions. //impl question: how to implement it? Initially, the // env will contain only macros, so it might be painful @@ -626,7 +626,6 @@ struct MapChainFrame { map: HashMap, } -// Only generic to make it easy to test pub struct SyntaxEnv { chain: Vec , } diff --git a/src/libsyntax/ext/deriving/encodable.rs b/src/libsyntax/ext/deriving/encodable.rs index f57670af1999b..bec715b3af84e 100644 --- a/src/libsyntax/ext/deriving/encodable.rs +++ b/src/libsyntax/ext/deriving/encodable.rs @@ -8,79 +8,76 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -The compiler code necessary to implement the `#[deriving(Encodable)]` -(and `Decodable`, in decodable.rs) extension. The idea here is that -type-defining items may be tagged with `#[deriving(Encodable, Decodable)]`. - -For example, a type like: - -```ignore -#[deriving(Encodable, Decodable)] -struct Node { id: uint } -``` - -would generate two implementations like: - -```ignore -impl Encodable for Node { - fn encode(&self, s: &S) { - s.emit_struct("Node", 1, || { - s.emit_field("id", 0, || s.emit_uint(self.id)) - }) - } -} - -impl Decodable for node_id { - fn decode(d: &D) -> Node { - d.read_struct("Node", 1, || { - Node { - id: d.read_field("x".to_string(), 0, || decode(d)) - } - }) - } -} -``` - -Other interesting scenarios are whe the item has type parameters or -references other non-built-in types. A type definition like: - -```ignore -#[deriving(Encodable, Decodable)] -struct spanned { node: T, span: Span } -``` - -would yield functions like: - -```ignore - impl< - S: Encoder, - T: Encodable - > spanned: Encodable { - fn encode(s: &S) { - s.emit_rec(|| { - s.emit_field("node", 0, || self.node.encode(s)); - s.emit_field("span", 1, || self.span.encode(s)); - }) - } - } - - impl< - D: Decoder, - T: Decodable - > spanned: Decodable { - fn decode(d: &D) -> spanned { - d.read_rec(|| { - { - node: d.read_field("node".to_string(), 0, || decode(d)), - span: d.read_field("span".to_string(), 1, || decode(d)), - } - }) - } - } -``` -*/ +/// The compiler code necessary to implement the `#[deriving(Encodable)]` +/// (and `Decodable`, in decodable.rs) extension. The idea here is that +/// type-defining items may be tagged with `#[deriving(Encodable, Decodable)]`. +/// +/// For example, a type like: +/// +/// ```ignore +/// #[deriving(Encodable, Decodable)] +/// struct Node { id: uint } +/// ``` +/// +/// would generate two implementations like: +/// +/// ```ignore +/// impl Encodable for Node { +/// fn encode(&self, s: &S) { +/// s.emit_struct("Node", 1, || { +/// s.emit_field("id", 0, || s.emit_uint(self.id)) +/// }) +/// } +/// } +/// +/// impl Decodable for node_id { +/// fn decode(d: &D) -> Node { +/// d.read_struct("Node", 1, || { +/// Node { +/// id: d.read_field("x".to_string(), 0, || decode(d)) +/// } +/// }) +/// } +/// } +/// ``` +/// +/// Other interesting scenarios are whe the item has type parameters or +/// references other non-built-in types. A type definition like: +/// +/// ```ignore +/// #[deriving(Encodable, Decodable)] +/// struct spanned { node: T, span: Span } +/// ``` +/// +/// would yield functions like: +/// +/// ```ignore +/// impl< +/// S: Encoder, +/// T: Encodable +/// > spanned: Encodable { +/// fn encode(s: &S) { +/// s.emit_rec(|| { +/// s.emit_field("node", 0, || self.node.encode(s)); +/// s.emit_field("span", 1, || self.span.encode(s)); +/// }) +/// } +/// } +/// +/// impl< +/// D: Decoder, +/// T: Decodable +/// > spanned: Decodable { +/// fn decode(d: &D) -> spanned { +/// d.read_rec(|| { +/// { +/// node: d.read_field("node".to_string(), 0, || decode(d)), +/// span: d.read_field("span".to_string(), 1, || decode(d)), +/// } +/// }) +/// } +/// } +/// ``` use ast; use ast::{MetaItem, Item, Expr, ExprRet, MutMutable, LitNil}; diff --git a/src/libsyntax/ext/deriving/generic/mod.rs b/src/libsyntax/ext/deriving/generic/mod.rs index 05269dbb44dda..cee46c0bcc778 100644 --- a/src/libsyntax/ext/deriving/generic/mod.rs +++ b/src/libsyntax/ext/deriving/generic/mod.rs @@ -8,174 +8,170 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -Some code that abstracts away much of the boilerplate of writing -`deriving` instances for traits. Among other things it manages getting -access to the fields of the 4 different sorts of structs and enum -variants, as well as creating the method and impl ast instances. - -Supported features (fairly exhaustive): - -- Methods taking any number of parameters of any type, and returning - any type, other than vectors, bottom and closures. -- Generating `impl`s for types with type parameters and lifetimes - (e.g. `Option`), the parameters are automatically given the - current trait as a bound. (This includes separate type parameters - and lifetimes for methods.) -- Additional bounds on the type parameters, e.g. the `Ord` instance - requires an explicit `PartialEq` bound at the - moment. (`TraitDef.additional_bounds`) - -Unsupported: FIXME #6257: calling methods on reference fields, -e.g. deriving Eq/Ord/Clone don't work on `struct A(&int)`, -because of how the auto-dereferencing happens. - -The most important thing for implementers is the `Substructure` and -`SubstructureFields` objects. The latter groups 5 possibilities of the -arguments: - -- `Struct`, when `Self` is a struct (including tuple structs, e.g - `struct T(int, char)`). -- `EnumMatching`, when `Self` is an enum and all the arguments are the - same variant of the enum (e.g. `Some(1)`, `Some(3)` and `Some(4)`) -- `EnumNonMatching` when `Self` is an enum and the arguments are not - the same variant (e.g. `None`, `Some(1)` and `None`). If - `const_nonmatching` is true, this will contain an empty list. -- `StaticEnum` and `StaticStruct` for static methods, where the type - being derived upon is either an enum or struct respectively. (Any - argument with type Self is just grouped among the non-self - arguments.) - -In the first two cases, the values from the corresponding fields in -all the arguments are grouped together. In the `EnumNonMatching` case -this isn't possible (different variants have different fields), so the -fields are grouped by which argument they come from. There are no -fields with values in the static cases, so these are treated entirely -differently. - -The non-static cases have `Option` in several places associated -with field `expr`s. This represents the name of the field it is -associated with. It is only not `None` when the associated field has -an identifier in the source code. For example, the `x`s in the -following snippet - -```rust -struct A { x : int } - -struct B(int); - -enum C { - C0(int), - C1 { x: int } -} -``` - -The `int`s in `B` and `C0` don't have an identifier, so the -`Option`s would be `None` for them. - -In the static cases, the structure is summarised, either into the just -spans of the fields or a list of spans and the field idents (for tuple -structs and record structs, respectively), or a list of these, for -enums (one for each variant). For empty struct and empty enum -variants, it is represented as a count of 0. - -# Examples - -The following simplified `PartialEq` is used for in-code examples: - -```rust -trait PartialEq { - fn eq(&self, other: &Self); -} -impl PartialEq for int { - fn eq(&self, other: &int) -> bool { - *self == *other - } -} -``` - -Some examples of the values of `SubstructureFields` follow, using the -above `PartialEq`, `A`, `B` and `C`. - -## Structs - -When generating the `expr` for the `A` impl, the `SubstructureFields` is - -~~~text -Struct(~[FieldInfo { - span: - name: Some(), - self_: , - other: ~[, - name: None, - - ~[] - }]) -~~~ - -## Enums - -When generating the `expr` for a call with `self == C0(a)` and `other -== C0(b)`, the SubstructureFields is - -~~~text -EnumMatching(0, , - ~[FieldInfo { - span: - name: None, - self_: , - other: ~[] - }]) -~~~ - -For `C1 {x}` and `C1 {x}`, - -~~~text -EnumMatching(1, , - ~[FieldInfo { - span: - name: Some(), - self_: , - other: ~[] - }]) -~~~ - -For `C0(a)` and `C1 {x}` , - -~~~text -EnumNonMatching(~[(0, , - ~[(, None, )]), - (1, , - ~[(, Some(), - )])]) -~~~ - -(and vice versa, but with the order of the outermost list flipped.) - -## Static - -A static method on the above would result in, - -~~~text -StaticStruct(, Named(~[(, )])) - -StaticStruct(, Unnamed(~[])) - -StaticEnum(, ~[(, , Unnamed(~[])), - (, , - Named(~[(, )]))]) -~~~ - -*/ +/// Some code that abstracts away much of the boilerplate of writing +/// `deriving` instances for traits. Among other things it manages getting +/// access to the fields of the 4 different sorts of structs and enum +/// variants, as well as creating the method and impl ast instances. +/// +/// Supported features (fairly exhaustive): +/// +/// - Methods taking any number of parameters of any type, and returning +/// any type, other than vectors, bottom and closures. +/// - Generating `impl`s for types with type parameters and lifetimes +/// (e.g. `Option`), the parameters are automatically given the +/// current trait as a bound. (This includes separate type parameters +/// and lifetimes for methods.) +/// - Additional bounds on the type parameters, e.g. the `Ord` instance +/// requires an explicit `PartialEq` bound at the +/// moment. (`TraitDef.additional_bounds`) +/// +/// Unsupported: FIXME #6257: calling methods on reference fields, +/// e.g. deriving Eq/Ord/Clone don't work on `struct A(&int)`, +/// because of how the auto-dereferencing happens. +/// +/// The most important thing for implementers is the `Substructure` and +/// `SubstructureFields` objects. The latter groups 5 possibilities of the +/// arguments: +/// +/// - `Struct`, when `Self` is a struct (including tuple structs, e.g +/// `struct T(int, char)`). +/// - `EnumMatching`, when `Self` is an enum and all the arguments are the +/// same variant of the enum (e.g. `Some(1)`, `Some(3)` and `Some(4)`) +/// - `EnumNonMatching` when `Self` is an enum and the arguments are not +/// the same variant (e.g. `None`, `Some(1)` and `None`). If +/// `const_nonmatching` is true, this will contain an empty list. +/// - `StaticEnum` and `StaticStruct` for static methods, where the type +/// being derived upon is either an enum or struct respectively. (Any +/// argument with type Self is just grouped among the non-self +/// arguments.) +/// +/// In the first two cases, the values from the corresponding fields in +/// all the arguments are grouped together. In the `EnumNonMatching` case +/// this isn't possible (different variants have different fields), so the +/// fields are grouped by which argument they come from. There are no +/// fields with values in the static cases, so these are treated entirely +/// differently. +/// +/// The non-static cases have `Option` in several places associated +/// with field `expr`s. This represents the name of the field it is +/// associated with. It is only not `None` when the associated field has +/// an identifier in the source code. For example, the `x`s in the +/// following snippet +/// +/// ```rust +/// struct A { x : int } +/// +/// struct B(int); +/// +/// enum C { +/// C0(int), +/// C1 { x: int } +/// } +/// ``` +/// +/// The `int`s in `B` and `C0` don't have an identifier, so the +/// `Option`s would be `None` for them. +/// +/// In the static cases, the structure is summarised, either into the just +/// spans of the fields or a list of spans and the field idents (for tuple +/// structs and record structs, respectively), or a list of these, for +/// enums (one for each variant). For empty struct and empty enum +/// variants, it is represented as a count of 0. +/// +/// # Examples +/// +/// The following simplified `PartialEq` is used for in-code examples: +/// +/// ```rust +/// trait PartialEq { +/// fn eq(&self, other: &Self); +/// } +/// impl PartialEq for int { +/// fn eq(&self, other: &int) -> bool { +/// *self == *other +/// } +/// } +/// ``` +/// +/// Some examples of the values of `SubstructureFields` follow, using the +/// above `PartialEq`, `A`, `B` and `C`. +/// +/// ## Structs +/// +/// When generating the `expr` for the `A` impl, the `SubstructureFields` is +/// +/// ~~~text +/// Struct(~[FieldInfo { +/// span: +/// name: Some(), +/// self_: , +/// other: ~[, +/// name: None, +/// +/// ~[] +/// }]) +/// ~~~ +/// +/// ## Enums +/// +/// When generating the `expr` for a call with `self == C0(a)` and `other +/// == C0(b)`, the SubstructureFields is +/// +/// ~~~text +/// EnumMatching(0, , +/// ~[FieldInfo { +/// span: +/// name: None, +/// self_: , +/// other: ~[] +/// }]) +/// ~~~ +/// +/// For `C1 {x}` and `C1 {x}`, +/// +/// ~~~text +/// EnumMatching(1, , +/// ~[FieldInfo { +/// span: +/// name: Some(), +/// self_: , +/// other: ~[] +/// }]) +/// ~~~ +/// +/// For `C0(a)` and `C1 {x}` , +/// +/// ~~~text +/// EnumNonMatching(~[(0, , +/// ~[(, None, )]), +/// (1, , +/// ~[(, Some(), +/// )])]) +/// ~~~ +/// +/// (and vice versa, but with the order of the outermost list flipped.) +/// +/// ## Static +/// +/// A static method on the above would result in, +/// +/// ~~~text +/// StaticStruct(, Named(~[(, )])) +/// +/// StaticStruct(, Unnamed(~[])) +/// +/// StaticEnum(, ~[(, , Unnamed(~[])), +/// (, , +/// Named(~[(, )]))]) +/// ~~~ use std::cell::RefCell; use std::gc::{Gc, GC}; diff --git a/src/libsyntax/ext/deriving/generic/ty.rs b/src/libsyntax/ext/deriving/generic/ty.rs index 7501b950770c2..b29572e269b35 100644 --- a/src/libsyntax/ext/deriving/generic/ty.rs +++ b/src/libsyntax/ext/deriving/generic/ty.rs @@ -24,8 +24,10 @@ use std::gc::Gc; /// The types of pointers pub enum PtrTy<'a> { - Send, // ~ - Borrowed(Option<&'a str>, ast::Mutability), // &['lifetime] [mut] + /// ~ + Send, + /// &'lifetime mut + Borrowed(Option<&'a str>, ast::Mutability), } /// A path, e.g. `::std::option::Option::` (global). Has support @@ -82,12 +84,12 @@ impl<'a> Path<'a> { /// A type. Supports pointers (except for *), Self, and literals pub enum Ty<'a> { Self, - // &/Box/ Ty + /// &/Box/ Ty Ptr(Box>, PtrTy<'a>), - // mod::mod::Type<[lifetime], [Params...]>, including a plain type - // parameter, and things like `int` + /// mod::mod::Type<[lifetime], [Params...]>, including a plain type + /// parameter, and things like `int` Literal(Path<'a>), - // includes nil + /// includes unit Tuple(Vec> ) } diff --git a/src/libsyntax/ext/deriving/show.rs b/src/libsyntax/ext/deriving/show.rs index 8e673ff246598..05b5131d7e4d3 100644 --- a/src/libsyntax/ext/deriving/show.rs +++ b/src/libsyntax/ext/deriving/show.rs @@ -55,8 +55,8 @@ pub fn expand_deriving_show(cx: &mut ExtCtxt, trait_def.expand(cx, mitem, item, push) } -// we construct a format string and then defer to std::fmt, since that -// knows what's up with formatting at so on. +/// We construct a format string and then defer to std::fmt, since that +/// knows what's up with formatting and so on. fn show_substructure(cx: &mut ExtCtxt, span: Span, substr: &Substructure) -> Gc { // build ``, `({}, {}, ...)` or ` { : {}, diff --git a/src/libsyntax/ext/expand.rs b/src/libsyntax/ext/expand.rs index fb2de2e271a99..996775885327a 100644 --- a/src/libsyntax/ext/expand.rs +++ b/src/libsyntax/ext/expand.rs @@ -233,11 +233,11 @@ pub fn expand_expr(e: Gc, fld: &mut MacroExpander) -> Gc { } } -// Rename loop label and expand its loop body -// -// The renaming procedure for loop is different in the sense that the loop -// body is in a block enclosed by loop head so the renaming of loop label -// must be propagated to the enclosed context. +/// Rename loop label and expand its loop body +/// +/// The renaming procedure for loop is different in the sense that the loop +/// body is in a block enclosed by loop head so the renaming of loop label +/// must be propagated to the enclosed context. fn expand_loop_block(loop_block: P, opt_ident: Option, fld: &mut MacroExpander) -> (P, Option) { diff --git a/src/libsyntax/ext/format.rs b/src/libsyntax/ext/format.rs index 857eadfe57cc4..9891f0e7b6f4a 100644 --- a/src/libsyntax/ext/format.rs +++ b/src/libsyntax/ext/format.rs @@ -38,24 +38,24 @@ struct Context<'a, 'b> { ecx: &'a mut ExtCtxt<'b>, fmtsp: Span, - // Parsed argument expressions and the types that we've found so far for - // them. + /// Parsed argument expressions and the types that we've found so far for + /// them. args: Vec>, arg_types: Vec>, - // Parsed named expressions and the types that we've found for them so far. - // Note that we keep a side-array of the ordering of the named arguments - // found to be sure that we can translate them in the same order that they - // were declared in. + /// Parsed named expressions and the types that we've found for them so far. + /// Note that we keep a side-array of the ordering of the named arguments + /// found to be sure that we can translate them in the same order that they + /// were declared in. names: HashMap>, name_types: HashMap, name_ordering: Vec, - // Collection of the compiled `rt::Piece` structures + /// Collection of the compiled `rt::Piece` structures pieces: Vec>, name_positions: HashMap, method_statics: Vec>, - // Updated as arguments are consumed or methods are entered + /// Updated as arguments are consumed or methods are entered nest_level: uint, next_arg: uint, } diff --git a/src/libsyntax/ext/mtwt.rs b/src/libsyntax/ext/mtwt.rs index 6c97a8aed1f55..8d2e970193cc6 100644 --- a/src/libsyntax/ext/mtwt.rs +++ b/src/libsyntax/ext/mtwt.rs @@ -21,15 +21,15 @@ use std::cell::RefCell; use std::rc::Rc; use std::collections::HashMap; -// the SCTable contains a table of SyntaxContext_'s. It -// represents a flattened tree structure, to avoid having -// managed pointers everywhere (that caused an ICE). -// the mark_memo and rename_memo fields are side-tables -// that ensure that adding the same mark to the same context -// gives you back the same context as before. This shouldn't -// change the semantics--everything here is immutable--but -// it should cut down on memory use *a lot*; applying a mark -// to a tree containing 50 identifiers would otherwise generate +/// the SCTable contains a table of SyntaxContext_'s. It +/// represents a flattened tree structure, to avoid having +/// managed pointers everywhere (that caused an ICE). +/// the mark_memo and rename_memo fields are side-tables +/// that ensure that adding the same mark to the same context +/// gives you back the same context as before. This shouldn't +/// change the semantics--everything here is immutable--but +/// it should cut down on memory use *a lot*; applying a mark +/// to a tree containing 50 identifiers would otherwise generate pub struct SCTable { table: RefCell>, mark_memo: RefCell>, @@ -40,16 +40,16 @@ pub struct SCTable { pub enum SyntaxContext_ { EmptyCtxt, Mark (Mrk,SyntaxContext), - // flattening the name and syntaxcontext into the rename... - // HIDDEN INVARIANTS: - // 1) the first name in a Rename node - // can only be a programmer-supplied name. - // 2) Every Rename node with a given Name in the - // "to" slot must have the same name and context - // in the "from" slot. In essence, they're all - // pointers to a single "rename" event node. + /// flattening the name and syntaxcontext into the rename... + /// HIDDEN INVARIANTS: + /// 1) the first name in a Rename node + /// can only be a programmer-supplied name. + /// 2) Every Rename node with a given Name in the + /// "to" slot must have the same name and context + /// in the "from" slot. In essence, they're all + /// pointers to a single "rename" event node. Rename (Ident,Name,SyntaxContext), - // actually, IllegalCtxt may not be necessary. + /// actually, IllegalCtxt may not be necessary. IllegalCtxt } @@ -58,7 +58,7 @@ pub fn new_mark(m: Mrk, tail: SyntaxContext) -> SyntaxContext { with_sctable(|table| new_mark_internal(m, tail, table)) } -// Extend a syntax context with a given mark and table +/// Extend a syntax context with a given mark and table fn new_mark_internal(m: Mrk, tail: SyntaxContext, table: &SCTable) -> SyntaxContext { let key = (tail, m); let new_ctxt = |_: &(SyntaxContext, Mrk)| @@ -73,7 +73,7 @@ pub fn new_rename(id: Ident, to:Name, with_sctable(|table| new_rename_internal(id, to, tail, table)) } -// Extend a syntax context with a given rename and sctable +/// Extend a syntax context with a given rename and sctable fn new_rename_internal(id: Ident, to: Name, tail: SyntaxContext, @@ -127,7 +127,7 @@ pub fn clear_tables() { with_resolve_table_mut(|table| *table = HashMap::new()); } -// Add a value to the end of a vec, return its index +/// Add a value to the end of a vec, return its index fn idx_push(vec: &mut Vec , val: T) -> u32 { vec.push(val); (vec.len() - 1) as u32 @@ -159,8 +159,8 @@ fn with_resolve_table_mut(op: |&mut ResolveTable| -> T) -> T { } } -// Resolve a syntax object to a name, per MTWT. -// adding memorization to possibly resolve 500+ seconds in resolve for librustc (!) +/// Resolve a syntax object to a name, per MTWT. +/// adding memorization to possibly save 500+ seconds in resolve for librustc (!) fn resolve_internal(id: Ident, table: &SCTable, resolve_table: &mut ResolveTable) -> Name { @@ -250,8 +250,8 @@ pub fn outer_mark(ctxt: SyntaxContext) -> Mrk { }) } -// Push a name... unless it matches the one on top, in which -// case pop and discard (so two of the same marks cancel) +/// Push a name... unless it matches the one on top, in which +/// case pop and discard (so two of the same marks cancel) fn xor_push(marks: &mut Vec, mark: Mrk) { if (marks.len() > 0) && (*marks.last().unwrap() == mark) { marks.pop().unwrap(); diff --git a/src/libsyntax/ext/source_util.rs b/src/libsyntax/ext/source_util.rs index 915fc16c15660..4c28c285eb561 100644 --- a/src/libsyntax/ext/source_util.rs +++ b/src/libsyntax/ext/source_util.rs @@ -28,7 +28,7 @@ use std::str; // the column/row/filename of the expression, or they include // a given file into the current one. -/* line!(): expands to the current line number */ +/// line!(): expands to the current line number pub fn expand_line(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> Box { base::check_zero_tts(cx, sp, tts, "line!"); @@ -49,9 +49,9 @@ pub fn expand_col(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) base::MacExpr::new(cx.expr_uint(topmost.call_site, loc.col.to_uint())) } -/* file!(): expands to the current filename */ -/* The filemap (`loc.file`) contains a bunch more information we could spit - * out if we wanted. */ +/// file!(): expands to the current filename */ +/// The filemap (`loc.file`) contains a bunch more information we could spit +/// out if we wanted. pub fn expand_file(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> Box { base::check_zero_tts(cx, sp, tts, "file!"); @@ -82,9 +82,9 @@ pub fn expand_mod(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) token::intern_and_get_ident(string.as_slice()))) } -// include! : parse the given file as an expr -// This is generally a bad idea because it's going to behave -// unhygienically. +/// include! : parse the given file as an expr +/// This is generally a bad idea because it's going to behave +/// unhygienically. pub fn expand_include(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> Box { let file = match get_single_str_from_tts(cx, sp, tts, "include!") { diff --git a/src/libsyntax/ext/tt/macro_parser.rs b/src/libsyntax/ext/tt/macro_parser.rs index 86fbc8cec2a34..27babe4730e04 100644 --- a/src/libsyntax/ext/tt/macro_parser.rs +++ b/src/libsyntax/ext/tt/macro_parser.rs @@ -8,7 +8,72 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// Earley-like parser for macros. +//! This is an Earley-like parser, without support for in-grammar nonterminals, +//! only by calling out to the main rust parser for named nonterminals (which it +//! commits to fully when it hits one in a grammar). This means that there are no +//! completer or predictor rules, and therefore no need to store one column per +//! token: instead, there's a set of current Earley items and a set of next +//! ones. Instead of NTs, we have a special case for Kleene star. The big-O, in +//! pathological cases, is worse than traditional Earley parsing, but it's an +//! easier fit for Macro-by-Example-style rules, and I think the overhead is +//! lower. (In order to prevent the pathological case, we'd need to lazily +//! construct the resulting `NamedMatch`es at the very end. It'd be a pain, +//! and require more memory to keep around old items, but it would also save +//! overhead) +//! +//! Quick intro to how the parser works: +//! +//! A 'position' is a dot in the middle of a matcher, usually represented as a +//! dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`. +//! +//! The parser walks through the input a character at a time, maintaining a list +//! of items consistent with the current position in the input string: `cur_eis`. +//! +//! As it processes them, it fills up `eof_eis` with items that would be valid if +//! the macro invocation is now over, `bb_eis` with items that are waiting on +//! a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting +//! on the a particular token. Most of the logic concerns moving the · through the +//! repetitions indicated by Kleene stars. It only advances or calls out to the +//! real Rust parser when no `cur_eis` items remain +//! +//! Example: Start parsing `a a a a b` against [· a $( a )* a b]. +//! +//! Remaining input: `a a a a b` +//! next_eis: [· a $( a )* a b] +//! +//! - - - Advance over an `a`. - - - +//! +//! Remaining input: `a a a b` +//! cur: [a · $( a )* a b] +//! Descend/Skip (first item). +//! next: [a $( · a )* a b] [a $( a )* · a b]. +//! +//! - - - Advance over an `a`. - - - +//! +//! Remaining input: `a a b` +//! cur: [a $( a · )* a b] next: [a $( a )* a · b] +//! Finish/Repeat (first item) +//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] +//! +//! - - - Advance over an `a`. - - - (this looks exactly like the last step) +//! +//! Remaining input: `a b` +//! cur: [a $( a · )* a b] next: [a $( a )* a · b] +//! Finish/Repeat (first item) +//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] +//! +//! - - - Advance over an `a`. - - - (this looks exactly like the last step) +//! +//! Remaining input: `b` +//! cur: [a $( a · )* a b] next: [a $( a )* a · b] +//! Finish/Repeat (first item) +//! next: [a $( a )* · a b] [a $( · a )* a b] +//! +//! - - - Advance over a `b`. - - - +//! +//! Remaining input: `` +//! eof: [a $( a )* a b ·] + use ast; use ast::{Matcher, MatchTok, MatchSeq, MatchNonterminal, Ident}; @@ -25,75 +90,6 @@ use std::rc::Rc; use std::gc::GC; use std::collections::HashMap; -/* This is an Earley-like parser, without support for in-grammar nonterminals, -only by calling out to the main rust parser for named nonterminals (which it -commits to fully when it hits one in a grammar). This means that there are no -completer or predictor rules, and therefore no need to store one column per -token: instead, there's a set of current Earley items and a set of next -ones. Instead of NTs, we have a special case for Kleene star. The big-O, in -pathological cases, is worse than traditional Earley parsing, but it's an -easier fit for Macro-by-Example-style rules, and I think the overhead is -lower. (In order to prevent the pathological case, we'd need to lazily -construct the resulting `NamedMatch`es at the very end. It'd be a pain, -and require more memory to keep around old items, but it would also save -overhead)*/ - -/* Quick intro to how the parser works: - -A 'position' is a dot in the middle of a matcher, usually represented as a -dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`. - -The parser walks through the input a character at a time, maintaining a list -of items consistent with the current position in the input string: `cur_eis`. - -As it processes them, it fills up `eof_eis` with items that would be valid if -the macro invocation is now over, `bb_eis` with items that are waiting on -a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting -on the a particular token. Most of the logic concerns moving the · through the -repetitions indicated by Kleene stars. It only advances or calls out to the -real Rust parser when no `cur_eis` items remain - -Example: Start parsing `a a a a b` against [· a $( a )* a b]. - -Remaining input: `a a a a b` -next_eis: [· a $( a )* a b] - -- - - Advance over an `a`. - - - - -Remaining input: `a a a b` -cur: [a · $( a )* a b] -Descend/Skip (first item). -next: [a $( · a )* a b] [a $( a )* · a b]. - -- - - Advance over an `a`. - - - - -Remaining input: `a a b` -cur: [a $( a · )* a b] next: [a $( a )* a · b] -Finish/Repeat (first item) -next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] - -- - - Advance over an `a`. - - - (this looks exactly like the last step) - -Remaining input: `a b` -cur: [a $( a · )* a b] next: [a $( a )* a · b] -Finish/Repeat (first item) -next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] - -- - - Advance over an `a`. - - - (this looks exactly like the last step) - -Remaining input: `b` -cur: [a $( a · )* a b] next: [a $( a )* a · b] -Finish/Repeat (first item) -next: [a $( a )* · a b] [a $( · a )* a b] - -- - - Advance over a `b`. - - - - -Remaining input: `` -eof: [a $( a )* a b ·] - - */ - - /* to avoid costly uniqueness checks, we require that `MatchSeq` always has a nonempty body. */ @@ -147,24 +143,24 @@ pub fn initial_matcher_pos(ms: Vec , sep: Option, lo: BytePos) } } -// NamedMatch is a pattern-match result for a single ast::MatchNonterminal: -// so it is associated with a single ident in a parse, and all -// MatchedNonterminal's in the NamedMatch have the same nonterminal type -// (expr, item, etc). All the leaves in a single NamedMatch correspond to a -// single matcher_nonterminal in the ast::Matcher that produced it. -// -// It should probably be renamed, it has more or less exact correspondence to -// ast::match nodes, and the in-memory structure of a particular NamedMatch -// represents the match that occurred when a particular subset of an -// ast::match -- those ast::Matcher nodes leading to a single -// MatchNonterminal -- was applied to a particular token tree. -// -// The width of each MatchedSeq in the NamedMatch, and the identity of the -// MatchedNonterminal's, will depend on the token tree it was applied to: each -// MatchedSeq corresponds to a single MatchSeq in the originating -// ast::Matcher. The depth of the NamedMatch structure will therefore depend -// only on the nesting depth of ast::MatchSeq's in the originating -// ast::Matcher it was derived from. +/// NamedMatch is a pattern-match result for a single ast::MatchNonterminal: +/// so it is associated with a single ident in a parse, and all +/// MatchedNonterminal's in the NamedMatch have the same nonterminal type +/// (expr, item, etc). All the leaves in a single NamedMatch correspond to a +/// single matcher_nonterminal in the ast::Matcher that produced it. +/// +/// It should probably be renamed, it has more or less exact correspondence to +/// ast::match nodes, and the in-memory structure of a particular NamedMatch +/// represents the match that occurred when a particular subset of an +/// ast::match -- those ast::Matcher nodes leading to a single +/// MatchNonterminal -- was applied to a particular token tree. +/// +/// The width of each MatchedSeq in the NamedMatch, and the identity of the +/// MatchedNonterminal's, will depend on the token tree it was applied to: each +/// MatchedSeq corresponds to a single MatchSeq in the originating +/// ast::Matcher. The depth of the NamedMatch structure will therefore depend +/// only on the nesting depth of ast::MatchSeq's in the originating +/// ast::Matcher it was derived from. pub enum NamedMatch { MatchedSeq(Vec>, codemap::Span), @@ -224,7 +220,8 @@ pub fn parse_or_else(sess: &ParseSess, } } -// perform a token equality check, ignoring syntax context (that is, an unhygienic comparison) +/// Perform a token equality check, ignoring syntax context (that is, an +/// unhygienic comparison) pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool { match (t1,t2) { (&token::IDENT(id1,_),&token::IDENT(id2,_)) diff --git a/src/libsyntax/ext/tt/macro_rules.rs b/src/libsyntax/ext/tt/macro_rules.rs index 72c578b87699c..ae396d5c53f49 100644 --- a/src/libsyntax/ext/tt/macro_rules.rs +++ b/src/libsyntax/ext/tt/macro_rules.rs @@ -119,7 +119,7 @@ impl MacResult for MacroRulesDefiner { } } -// Given `lhses` and `rhses`, this is the new macro we create +/// Given `lhses` and `rhses`, this is the new macro we create fn generic_extension(cx: &ExtCtxt, sp: Span, name: Ident, @@ -193,9 +193,9 @@ fn generic_extension(cx: &ExtCtxt, cx.span_fatal(best_fail_spot, best_fail_msg.as_slice()); } -// this procedure performs the expansion of the -// macro_rules! macro. It parses the RHS and adds -// an extension to the current context. +/// This procedure performs the expansion of the +/// macro_rules! macro. It parses the RHS and adds +/// an extension to the current context. pub fn add_new_extension(cx: &mut ExtCtxt, sp: Span, name: Ident, diff --git a/src/libsyntax/ext/tt/transcribe.rs b/src/libsyntax/ext/tt/transcribe.rs index c0c066fe4668b..726a7315f6991 100644 --- a/src/libsyntax/ext/tt/transcribe.rs +++ b/src/libsyntax/ext/tt/transcribe.rs @@ -32,7 +32,7 @@ struct TtFrame { #[deriving(Clone)] pub struct TtReader<'a> { pub sp_diag: &'a SpanHandler, - // the unzipped tree: + /// the unzipped tree: stack: Vec, /* for MBE-style macro transcription */ interpolations: HashMap>, @@ -43,9 +43,9 @@ pub struct TtReader<'a> { pub cur_span: Span, } -/** This can do Macro-By-Example transcription. On the other hand, if - * `src` contains no `TTSeq`s and `TTNonterminal`s, `interp` can (and - * should) be none. */ +/// This can do Macro-By-Example transcription. On the other hand, if +/// `src` contains no `TTSeq`s and `TTNonterminal`s, `interp` can (and +/// should) be none. pub fn new_tt_reader<'a>(sp_diag: &'a SpanHandler, interp: Option>>, src: Vec ) @@ -138,8 +138,8 @@ fn lockstep_iter_size(t: &TokenTree, r: &TtReader) -> LockstepIterSize { } } -// return the next token from the TtReader. -// EFFECT: advances the reader's token field +/// Return the next token from the TtReader. +/// EFFECT: advances the reader's token field pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan { // FIXME(pcwalton): Bad copy? let ret_val = TokenAndSpan { diff --git a/src/libsyntax/lib.rs b/src/libsyntax/lib.rs index 0d6821fb30c08..69638f9c1a429 100644 --- a/src/libsyntax/lib.rs +++ b/src/libsyntax/lib.rs @@ -8,15 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! - -The Rust parser and macro expander. - -# Note - -This API is completely unstable and subject to change. - -*/ +//! The Rust parser and macro expander. +//! +//! # Note +//! +//! This API is completely unstable and subject to change. #![crate_id = "syntax#0.11.0-pre"] #![experimental] diff --git a/src/libsyntax/parse/attr.rs b/src/libsyntax/parse/attr.rs index e47080dadfd72..1ca7a2df06037 100644 --- a/src/libsyntax/parse/attr.rs +++ b/src/libsyntax/parse/attr.rs @@ -18,7 +18,7 @@ use parse::token::INTERPOLATED; use std::gc::{Gc, GC}; -// a parser that can parse attributes. +/// A parser that can parse attributes. pub trait ParserAttr { fn parse_outer_attributes(&mut self) -> Vec; fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute; @@ -30,7 +30,7 @@ pub trait ParserAttr { } impl<'a> ParserAttr for Parser<'a> { - // Parse attributes that appear before an item + /// Parse attributes that appear before an item fn parse_outer_attributes(&mut self) -> Vec { let mut attrs: Vec = Vec::new(); loop { @@ -59,10 +59,10 @@ impl<'a> ParserAttr for Parser<'a> { return attrs; } - // matches attribute = # ! [ meta_item ] - // - // if permit_inner is true, then a leading `!` indicates an inner - // attribute + /// Matches `attribute = # ! [ meta_item ]` + /// + /// If permit_inner is true, then a leading `!` indicates an inner + /// attribute fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute { debug!("parse_attributes: permit_inner={:?} self.token={:?}", permit_inner, self.token); @@ -114,17 +114,17 @@ impl<'a> ParserAttr for Parser<'a> { }; } - // Parse attributes that appear after the opening of an item. These should - // be preceded by an exclamation mark, but we accept and warn about one - // terminated by a semicolon. In addition to a vector of inner attributes, - // this function also returns a vector that may contain the first outer - // attribute of the next item (since we can't know whether the attribute - // is an inner attribute of the containing item or an outer attribute of - // the first contained item until we see the semi). - - // matches inner_attrs* outer_attr? - // you can make the 'next' field an Option, but the result is going to be - // more useful as a vector. + /// Parse attributes that appear after the opening of an item. These should + /// be preceded by an exclamation mark, but we accept and warn about one + /// terminated by a semicolon. In addition to a vector of inner attributes, + /// this function also returns a vector that may contain the first outer + /// attribute of the next item (since we can't know whether the attribute + /// is an inner attribute of the containing item or an outer attribute of + /// the first contained item until we see the semi). + + /// matches inner_attrs* outer_attr? + /// you can make the 'next' field an Option, but the result is going to be + /// more useful as a vector. fn parse_inner_attrs_and_next(&mut self) -> (Vec , Vec ) { let mut inner_attrs: Vec = Vec::new(); @@ -157,9 +157,9 @@ impl<'a> ParserAttr for Parser<'a> { (inner_attrs, next_outer_attrs) } - // matches meta_item = IDENT - // | IDENT = lit - // | IDENT meta_seq + /// matches meta_item = IDENT + /// | IDENT = lit + /// | IDENT meta_seq fn parse_meta_item(&mut self) -> Gc { match self.token { token::INTERPOLATED(token::NtMeta(e)) => { @@ -201,7 +201,7 @@ impl<'a> ParserAttr for Parser<'a> { } } - // matches meta_seq = ( COMMASEP(meta_item) ) + /// matches meta_seq = ( COMMASEP(meta_item) ) fn parse_meta_seq(&mut self) -> Vec> { self.parse_seq(&token::LPAREN, &token::RPAREN, diff --git a/src/libsyntax/parse/classify.rs b/src/libsyntax/parse/classify.rs index 8d9cc305c26e8..516f22cdf4d60 100644 --- a/src/libsyntax/parse/classify.rs +++ b/src/libsyntax/parse/classify.rs @@ -15,13 +15,13 @@ use ast; use std::gc::Gc; -// does this expression require a semicolon to be treated -// as a statement? The negation of this: 'can this expression -// be used as a statement without a semicolon' -- is used -// as an early-bail-out in the parser so that, for instance, -// 'if true {...} else {...} -// |x| 5 ' -// isn't parsed as (if true {...} else {...} | x) | 5 +/// Does this expression require a semicolon to be treated +/// as a statement? The negation of this: 'can this expression +/// be used as a statement without a semicolon' -- is used +/// as an early-bail-out in the parser so that, for instance, +/// if true {...} else {...} +/// |x| 5 +/// isn't parsed as (if true {...} else {...} | x) | 5 pub fn expr_requires_semi_to_be_stmt(e: Gc) -> bool { match e.node { ast::ExprIf(..) @@ -41,9 +41,9 @@ pub fn expr_is_simple_block(e: Gc) -> bool { } } -// this statement requires a semicolon after it. -// note that in one case (stmt_semi), we've already -// seen the semicolon, and thus don't need another. +/// this statement requires a semicolon after it. +/// note that in one case (stmt_semi), we've already +/// seen the semicolon, and thus don't need another. pub fn stmt_ends_with_semi(stmt: &ast::Stmt) -> bool { return match stmt.node { ast::StmtDecl(d, _) => { diff --git a/src/libsyntax/parse/common.rs b/src/libsyntax/parse/common.rs index 3c3f0c7a82044..3842170d67777 100644 --- a/src/libsyntax/parse/common.rs +++ b/src/libsyntax/parse/common.rs @@ -12,8 +12,8 @@ use parse::token; -// SeqSep : a sequence separator (token) -// and whether a trailing separator is allowed. +/// SeqSep : a sequence separator (token) +/// and whether a trailing separator is allowed. pub struct SeqSep { pub sep: Option, pub trailing_sep_allowed: bool diff --git a/src/libsyntax/parse/lexer/comments.rs b/src/libsyntax/parse/lexer/comments.rs index a009955f91a7b..0f434433a2c41 100644 --- a/src/libsyntax/parse/lexer/comments.rs +++ b/src/libsyntax/parse/lexer/comments.rs @@ -24,10 +24,14 @@ use std::uint; #[deriving(Clone, PartialEq)] pub enum CommentStyle { - Isolated, // No code on either side of each line of the comment - Trailing, // Code exists to the left of the comment - Mixed, // Code before /* foo */ and after the comment - BlankLine, // Just a manual blank line "\n\n", for layout + /// No code on either side of each line of the comment + Isolated, + /// Code exists to the left of the comment + Trailing, + /// Code before /* foo */ and after the comment + Mixed, + /// Just a manual blank line "\n\n", for layout + BlankLine, } #[deriving(Clone)] @@ -198,9 +202,9 @@ fn read_line_comments(rdr: &mut StringReader, code_to_the_left: bool, } } -// Returns None if the first col chars of s contain a non-whitespace char. -// Otherwise returns Some(k) where k is first char offset after that leading -// whitespace. Note k may be outside bounds of s. +/// Returns None if the first col chars of s contain a non-whitespace char. +/// Otherwise returns Some(k) where k is first char offset after that leading +/// whitespace. Note k may be outside bounds of s. fn all_whitespace(s: &str, col: CharPos) -> Option { let len = s.len(); let mut col = col.to_uint(); diff --git a/src/libsyntax/parse/lexer/mod.rs b/src/libsyntax/parse/lexer/mod.rs index add9a4cb9f3bd..276fa7ddf1475 100644 --- a/src/libsyntax/parse/lexer/mod.rs +++ b/src/libsyntax/parse/lexer/mod.rs @@ -44,13 +44,13 @@ pub struct TokenAndSpan { pub struct StringReader<'a> { pub span_diagnostic: &'a SpanHandler, - // The absolute offset within the codemap of the next character to read + /// The absolute offset within the codemap of the next character to read pub pos: BytePos, - // The absolute offset within the codemap of the last character read(curr) + /// The absolute offset within the codemap of the last character read(curr) pub last_pos: BytePos, - // The column of the next character to read + /// The column of the next character to read pub col: CharPos, - // The last character to be read + /// The last character to be read pub curr: Option, pub filemap: Rc, /* cached: */ @@ -60,7 +60,7 @@ pub struct StringReader<'a> { impl<'a> Reader for StringReader<'a> { fn is_eof(&self) -> bool { self.curr.is_none() } - // return the next token. EFFECT: advances the string_reader. + /// Return the next token. EFFECT: advances the string_reader. fn next_token(&mut self) -> TokenAndSpan { let ret_val = TokenAndSpan { tok: replace(&mut self.peek_tok, token::UNDERSCORE), @@ -90,7 +90,7 @@ impl<'a> Reader for TtReader<'a> { } fn next_token(&mut self) -> TokenAndSpan { let r = tt_next_token(self); - debug!("TtReader: r={:?}", r); + debug!("TtReader: r={}", r); r } fn fatal(&self, m: &str) -> ! { @@ -417,7 +417,7 @@ impl<'a> StringReader<'a> { return self.consume_any_line_comment(); } - // might return a sugared-doc-attr + /// Might return a sugared-doc-attr fn consume_block_comment(&mut self) -> Option { // block comments starting with "/**" or "/*!" are doc-comments let is_doc_comment = self.curr_is('*') || self.curr_is('!'); diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index 331a49c83beac..fb6c7bfdd411b 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -10,7 +10,6 @@ //! The main parser interface - use ast; use codemap::{Span, CodeMap, FileMap}; use diagnostic::{SpanHandler, mk_span_handler, default_handler, Auto}; @@ -32,7 +31,7 @@ pub mod common; pub mod classify; pub mod obsolete; -// info about a parsing session. +/// Info about a parsing session. pub struct ParseSess { pub span_diagnostic: SpanHandler, // better be the same as the one in the reader! /// Used to determine and report recursive mod inclusions @@ -241,14 +240,14 @@ pub fn file_to_filemap(sess: &ParseSess, path: &Path, spanopt: Option) unreachable!() } -// given a session and a string, add the string to -// the session's codemap and return the new filemap +/// Given a session and a string, add the string to +/// the session's codemap and return the new filemap pub fn string_to_filemap(sess: &ParseSess, source: String, path: String) -> Rc { sess.span_diagnostic.cm.new_filemap(path, source) } -// given a filemap, produce a sequence of token-trees +/// Given a filemap, produce a sequence of token-trees pub fn filemap_to_tts(sess: &ParseSess, filemap: Rc) -> Vec { // it appears to me that the cfg doesn't matter here... indeed, @@ -259,7 +258,7 @@ pub fn filemap_to_tts(sess: &ParseSess, filemap: Rc) p1.parse_all_token_trees() } -// given tts and cfg, produce a parser +/// Given tts and cfg, produce a parser pub fn tts_to_parser<'a>(sess: &'a ParseSess, tts: Vec, cfg: ast::CrateConfig) -> Parser<'a> { @@ -267,7 +266,7 @@ pub fn tts_to_parser<'a>(sess: &'a ParseSess, Parser::new(sess, cfg, box trdr) } -// abort if necessary +/// Abort if necessary pub fn maybe_aborted(result: T, mut p: Parser) -> T { p.abort_if_errors(); result diff --git a/src/libsyntax/parse/obsolete.rs b/src/libsyntax/parse/obsolete.rs index 025684ae71e8c..cadae7ef12f80 100644 --- a/src/libsyntax/parse/obsolete.rs +++ b/src/libsyntax/parse/obsolete.rs @@ -38,8 +38,8 @@ pub enum ObsoleteSyntax { pub trait ParserObsoleteMethods { /// Reports an obsolete syntax non-fatal error. fn obsolete(&mut self, sp: Span, kind: ObsoleteSyntax); - // Reports an obsolete syntax non-fatal error, and returns - // a placeholder expression + /// Reports an obsolete syntax non-fatal error, and returns + /// a placeholder expression fn obsolete_expr(&mut self, sp: Span, kind: ObsoleteSyntax) -> Gc; fn report(&mut self, sp: Span, @@ -83,8 +83,8 @@ impl<'a> ParserObsoleteMethods for parser::Parser<'a> { self.report(sp, kind, kind_str, desc); } - // Reports an obsolete syntax non-fatal error, and returns - // a placeholder expression + /// Reports an obsolete syntax non-fatal error, and returns + /// a placeholder expression fn obsolete_expr(&mut self, sp: Span, kind: ObsoleteSyntax) -> Gc { self.obsolete(sp, kind); self.mk_expr(sp.lo, sp.hi, ExprLit(box(GC) respan(sp, LitNil))) diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index e1319304e04e9..532d4d7d793c2 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -118,8 +118,8 @@ pub struct PathAndBounds { } enum ItemOrViewItem { - // Indicates a failure to parse any kind of item. The attributes are - // returned. + /// Indicates a failure to parse any kind of item. The attributes are + /// returned. IoviNone(Vec), IoviItem(Gc), IoviForeignItem(Gc), @@ -127,12 +127,12 @@ enum ItemOrViewItem { } -// Possibly accept an `INTERPOLATED` expression (a pre-parsed expression -// dropped into the token stream, which happens while parsing the -// result of macro expansion) -/* Placement of these is not as complex as I feared it would be. -The important thing is to make sure that lookahead doesn't balk -at INTERPOLATED tokens */ +/// Possibly accept an `INTERPOLATED` expression (a pre-parsed expression +/// dropped into the token stream, which happens while parsing the +/// result of macro expansion) +/// Placement of these is not as complex as I feared it would be. +/// The important thing is to make sure that lookahead doesn't balk +/// at INTERPOLATED tokens macro_rules! maybe_whole_expr ( ($p:expr) => ( { @@ -167,7 +167,7 @@ macro_rules! maybe_whole_expr ( ) ) -// As above, but for things other than expressions +/// As maybe_whole_expr, but for things other than expressions macro_rules! maybe_whole ( ($p:expr, $constructor:ident) => ( { @@ -288,14 +288,14 @@ struct ParsedItemsAndViewItems { pub struct Parser<'a> { pub sess: &'a ParseSess, - // the current token: + /// the current token: pub token: token::Token, - // the span of the current token: + /// the span of the current token: pub span: Span, - // the span of the prior token: + /// the span of the prior token: pub last_span: Span, pub cfg: CrateConfig, - // the previous token or None (only stashed sometimes). + /// the previous token or None (only stashed sometimes). pub last_token: Option>, pub buffer: [TokenAndSpan, ..4], pub buffer_start: int, @@ -362,12 +362,12 @@ impl<'a> Parser<'a> { root_module_name: None, } } - // convert a token to a string using self's reader + /// Convert a token to a string using self's reader pub fn token_to_str(token: &token::Token) -> String { token::to_str(token) } - // convert the current token to a string using self's reader + /// Convert the current token to a string using self's reader pub fn this_token_to_str(&mut self) -> String { Parser::token_to_str(&self.token) } @@ -384,8 +384,8 @@ impl<'a> Parser<'a> { self.fatal(format!("unexpected token: `{}`", this_token).as_slice()); } - // expect and consume the token t. Signal an error if - // the next token is not t. + /// Expect and consume the token t. Signal an error if + /// the next token is not t. pub fn expect(&mut self, t: &token::Token) { if self.token == *t { self.bump(); @@ -398,9 +398,9 @@ impl<'a> Parser<'a> { } } - // Expect next token to be edible or inedible token. If edible, - // then consume it; if inedible, then return without consuming - // anything. Signal a fatal error if next token is unexpected. + /// Expect next token to be edible or inedible token. If edible, + /// then consume it; if inedible, then return without consuming + /// anything. Signal a fatal error if next token is unexpected. pub fn expect_one_of(&mut self, edible: &[token::Token], inedible: &[token::Token]) { @@ -438,9 +438,9 @@ impl<'a> Parser<'a> { } } - // Check for erroneous `ident { }`; if matches, signal error and - // recover (without consuming any expected input token). Returns - // true if and only if input was consumed for recovery. + /// Check for erroneous `ident { }`; if matches, signal error and + /// recover (without consuming any expected input token). Returns + /// true if and only if input was consumed for recovery. pub fn check_for_erroneous_unit_struct_expecting(&mut self, expected: &[token::Token]) -> bool { if self.token == token::LBRACE && expected.iter().all(|t| *t != token::LBRACE) @@ -457,9 +457,9 @@ impl<'a> Parser<'a> { } } - // Commit to parsing a complete expression `e` expected to be - // followed by some token from the set edible + inedible. Recover - // from anticipated input errors, discarding erroneous characters. + /// Commit to parsing a complete expression `e` expected to be + /// followed by some token from the set edible + inedible. Recover + /// from anticipated input errors, discarding erroneous characters. pub fn commit_expr(&mut self, e: Gc, edible: &[token::Token], inedible: &[token::Token]) { debug!("commit_expr {:?}", e); @@ -480,9 +480,9 @@ impl<'a> Parser<'a> { self.commit_expr(e, &[edible], &[]) } - // Commit to parsing a complete statement `s`, which expects to be - // followed by some token from the set edible + inedible. Check - // for recoverable input errors, discarding erroneous characters. + /// Commit to parsing a complete statement `s`, which expects to be + /// followed by some token from the set edible + inedible. Check + /// for recoverable input errors, discarding erroneous characters. pub fn commit_stmt(&mut self, s: Gc, edible: &[token::Token], inedible: &[token::Token]) { debug!("commit_stmt {:?}", s); @@ -527,8 +527,8 @@ impl<'a> Parser<'a> { id: ast::DUMMY_NODE_ID }) } - // consume token 'tok' if it exists. Returns true if the given - // token was present, false otherwise. + /// Consume token 'tok' if it exists. Returns true if the given + /// token was present, false otherwise. pub fn eat(&mut self, tok: &token::Token) -> bool { let is_present = self.token == *tok; if is_present { self.bump() } @@ -539,8 +539,8 @@ impl<'a> Parser<'a> { token::is_keyword(kw, &self.token) } - // if the next token is the given keyword, eat it and return - // true. Otherwise, return false. + /// If the next token is the given keyword, eat it and return + /// true. Otherwise, return false. pub fn eat_keyword(&mut self, kw: keywords::Keyword) -> bool { let is_kw = match self.token { token::IDENT(sid, false) => kw.to_ident().name == sid.name, @@ -550,9 +550,9 @@ impl<'a> Parser<'a> { is_kw } - // if the given word is not a keyword, signal an error. - // if the next token is not the given word, signal an error. - // otherwise, eat it. + /// If the given word is not a keyword, signal an error. + /// If the next token is not the given word, signal an error. + /// Otherwise, eat it. pub fn expect_keyword(&mut self, kw: keywords::Keyword) { if !self.eat_keyword(kw) { let id_interned_str = token::get_ident(kw.to_ident()); @@ -562,7 +562,7 @@ impl<'a> Parser<'a> { } } - // signal an error if the given string is a strict keyword + /// Signal an error if the given string is a strict keyword pub fn check_strict_keywords(&mut self) { if token::is_strict_keyword(&self.token) { let token_str = self.this_token_to_str(); @@ -573,7 +573,7 @@ impl<'a> Parser<'a> { } } - // signal an error if the current token is a reserved keyword + /// Signal an error if the current token is a reserved keyword pub fn check_reserved_keywords(&mut self) { if token::is_reserved_keyword(&self.token) { let token_str = self.this_token_to_str(); @@ -582,8 +582,8 @@ impl<'a> Parser<'a> { } } - // Expect and consume an `&`. If `&&` is seen, replace it with a single - // `&` and continue. If an `&` is not seen, signal an error. + /// Expect and consume an `&`. If `&&` is seen, replace it with a single + /// `&` and continue. If an `&` is not seen, signal an error. fn expect_and(&mut self) { match self.token { token::BINOP(token::AND) => self.bump(), @@ -603,8 +603,8 @@ impl<'a> Parser<'a> { } } - // Expect and consume a `|`. If `||` is seen, replace it with a single - // `|` and continue. If a `|` is not seen, signal an error. + /// Expect and consume a `|`. If `||` is seen, replace it with a single + /// `|` and continue. If a `|` is not seen, signal an error. fn expect_or(&mut self) { match self.token { token::BINOP(token::OR) => self.bump(), @@ -624,26 +624,26 @@ impl<'a> Parser<'a> { } } - // Attempt to consume a `<`. If `<<` is seen, replace it with a single - // `<` and continue. If a `<` is not seen, return false. - // - // This is meant to be used when parsing generics on a path to get the - // starting token. The `force` parameter is used to forcefully break up a - // `<<` token. If `force` is false, then `<<` is only broken when a lifetime - // shows up next. For example, consider the expression: - // - // foo as bar << test - // - // The parser needs to know if `bar <<` is the start of a generic path or if - // it's a left-shift token. If `test` were a lifetime, then it's impossible - // for the token to be a left-shift, but if it's not a lifetime, then it's - // considered a left-shift. - // - // The reason for this is that the only current ambiguity with `<<` is when - // parsing closure types: - // - // foo::<<'a> ||>(); - // impl Foo<<'a> ||>() { ... } + /// Attempt to consume a `<`. If `<<` is seen, replace it with a single + /// `<` and continue. If a `<` is not seen, return false. + /// + /// This is meant to be used when parsing generics on a path to get the + /// starting token. The `force` parameter is used to forcefully break up a + /// `<<` token. If `force` is false, then `<<` is only broken when a lifetime + /// shows up next. For example, consider the expression: + /// + /// foo as bar << test + /// + /// The parser needs to know if `bar <<` is the start of a generic path or if + /// it's a left-shift token. If `test` were a lifetime, then it's impossible + /// for the token to be a left-shift, but if it's not a lifetime, then it's + /// considered a left-shift. + /// + /// The reason for this is that the only current ambiguity with `<<` is when + /// parsing closure types: + /// + /// foo::<<'a> ||>(); + /// impl Foo<<'a> ||>() { ... } fn eat_lt(&mut self, force: bool) -> bool { match self.token { token::LT => { self.bump(); true } @@ -675,7 +675,7 @@ impl<'a> Parser<'a> { } } - // Parse a sequence bracketed by `|` and `|`, stopping before the `|`. + /// Parse a sequence bracketed by `|` and `|`, stopping before the `|`. fn parse_seq_to_before_or( &mut self, sep: &token::Token, @@ -696,9 +696,9 @@ impl<'a> Parser<'a> { vector } - // expect and consume a GT. if a >> is seen, replace it - // with a single > and continue. If a GT is not seen, - // signal an error. + /// Expect and consume a GT. if a >> is seen, replace it + /// with a single > and continue. If a GT is not seen, + /// signal an error. pub fn expect_gt(&mut self) { match self.token { token::GT => self.bump(), @@ -727,8 +727,8 @@ impl<'a> Parser<'a> { } } - // parse a sequence bracketed by '<' and '>', stopping - // before the '>'. + /// Parse a sequence bracketed by '<' and '>', stopping + /// before the '>'. pub fn parse_seq_to_before_gt( &mut self, sep: Option, @@ -762,9 +762,9 @@ impl<'a> Parser<'a> { return v; } - // parse a sequence, including the closing delimiter. The function - // f must consume tokens until reaching the next separator or - // closing bracket. + /// Parse a sequence, including the closing delimiter. The function + /// f must consume tokens until reaching the next separator or + /// closing bracket. pub fn parse_seq_to_end( &mut self, ket: &token::Token, @@ -776,9 +776,9 @@ impl<'a> Parser<'a> { val } - // parse a sequence, not including the closing delimiter. The function - // f must consume tokens until reaching the next separator or - // closing bracket. + /// Parse a sequence, not including the closing delimiter. The function + /// f must consume tokens until reaching the next separator or + /// closing bracket. pub fn parse_seq_to_before_end( &mut self, ket: &token::Token, @@ -801,9 +801,9 @@ impl<'a> Parser<'a> { return v; } - // parse a sequence, including the closing delimiter. The function - // f must consume tokens until reaching the next separator or - // closing bracket. + /// Parse a sequence, including the closing delimiter. The function + /// f must consume tokens until reaching the next separator or + /// closing bracket. pub fn parse_unspanned_seq( &mut self, bra: &token::Token, @@ -817,8 +817,8 @@ impl<'a> Parser<'a> { result } - // parse a sequence parameter of enum variant. For consistency purposes, - // these should not be empty. + /// Parse a sequence parameter of enum variant. For consistency purposes, + /// these should not be empty. pub fn parse_enum_variant_seq( &mut self, bra: &token::Token, @@ -852,7 +852,7 @@ impl<'a> Parser<'a> { spanned(lo, hi, result) } - // advance the parser by one token + /// Advance the parser by one token pub fn bump(&mut self) { self.last_span = self.span; // Stash token for error recovery (sometimes; clone is not necessarily cheap). @@ -880,14 +880,14 @@ impl<'a> Parser<'a> { self.tokens_consumed += 1u; } - // Advance the parser by one token and return the bumped token. + /// Advance the parser by one token and return the bumped token. pub fn bump_and_get(&mut self) -> token::Token { let old_token = replace(&mut self.token, token::UNDERSCORE); self.bump(); old_token } - // EFFECT: replace the current token and span with the given one + /// EFFECT: replace the current token and span with the given one pub fn replace_token(&mut self, next: token::Token, lo: BytePos, @@ -940,8 +940,8 @@ impl<'a> Parser<'a> { token::get_ident(id) } - // Is the current token one of the keywords that signals a bare function - // type? + /// Is the current token one of the keywords that signals a bare function + /// type? pub fn token_is_bare_fn_keyword(&mut self) -> bool { if token::is_keyword(keywords::Fn, &self.token) { return true @@ -955,14 +955,14 @@ impl<'a> Parser<'a> { false } - // Is the current token one of the keywords that signals a closure type? + /// Is the current token one of the keywords that signals a closure type? pub fn token_is_closure_keyword(&mut self) -> bool { token::is_keyword(keywords::Unsafe, &self.token) || token::is_keyword(keywords::Once, &self.token) } - // Is the current token one of the keywords that signals an old-style - // closure type (with explicit sigil)? + /// Is the current token one of the keywords that signals an old-style + /// closure type (with explicit sigil)? pub fn token_is_old_style_closure_keyword(&mut self) -> bool { token::is_keyword(keywords::Unsafe, &self.token) || token::is_keyword(keywords::Once, &self.token) || @@ -983,7 +983,7 @@ impl<'a> Parser<'a> { } } - // parse a TyBareFn type: + /// parse a TyBareFn type: pub fn parse_ty_bare_fn(&mut self) -> Ty_ { /* @@ -1014,8 +1014,8 @@ impl<'a> Parser<'a> { }); } - // Parses a procedure type (`proc`). The initial `proc` keyword must - // already have been parsed. + /// Parses a procedure type (`proc`). The initial `proc` keyword must + /// already have been parsed. pub fn parse_proc_type(&mut self) -> Ty_ { /* @@ -1063,7 +1063,7 @@ impl<'a> Parser<'a> { }) } - // parse a TyClosure type + /// Parse a TyClosure type pub fn parse_ty_closure(&mut self) -> Ty_ { /* @@ -1154,7 +1154,7 @@ impl<'a> Parser<'a> { } } - // parse a function type (following the 'fn') + /// Parse a function type (following the 'fn') pub fn parse_ty_fn_decl(&mut self, allow_variadic: bool) -> (P, Vec) { /* @@ -1186,7 +1186,7 @@ impl<'a> Parser<'a> { (decl, lifetimes) } - // parse the methods in a trait declaration + /// Parse the methods in a trait declaration pub fn parse_trait_methods(&mut self) -> Vec { self.parse_unspanned_seq( &token::LBRACE, @@ -1255,15 +1255,15 @@ impl<'a> Parser<'a> { }) } - // parse a possibly mutable type + /// Parse a possibly mutable type pub fn parse_mt(&mut self) -> MutTy { let mutbl = self.parse_mutability(); let t = self.parse_ty(true); MutTy { ty: t, mutbl: mutbl } } - // parse [mut/const/imm] ID : TY - // now used only by obsolete record syntax parser... + /// Parse [mut/const/imm] ID : TY + /// now used only by obsolete record syntax parser... pub fn parse_ty_field(&mut self) -> TypeField { let lo = self.span.lo; let mutbl = self.parse_mutability(); @@ -1278,7 +1278,7 @@ impl<'a> Parser<'a> { } } - // parse optional return type [ -> TY ] in function decl + /// Parse optional return type [ -> TY ] in function decl pub fn parse_ret_ty(&mut self) -> (RetStyle, P) { return if self.eat(&token::RARROW) { let lo = self.span.lo; @@ -1474,8 +1474,8 @@ impl<'a> Parser<'a> { } } - // This version of parse arg doesn't necessarily require - // identifier names. + /// This version of parse arg doesn't necessarily require + /// identifier names. pub fn parse_arg_general(&mut self, require_name: bool) -> Arg { let pat = if require_name || self.is_named_argument() { debug!("parse_arg_general parse_pat (require_name:{:?})", @@ -1500,12 +1500,12 @@ impl<'a> Parser<'a> { } } - // parse a single function argument + /// Parse a single function argument pub fn parse_arg(&mut self) -> Arg { self.parse_arg_general(true) } - // parse an argument in a lambda header e.g. |arg, arg| + /// Parse an argument in a lambda header e.g. |arg, arg| pub fn parse_fn_block_arg(&mut self) -> Arg { let pat = self.parse_pat(); let t = if self.eat(&token::COLON) { @@ -1535,7 +1535,7 @@ impl<'a> Parser<'a> { } } - // matches token_lit = LIT_INT | ... + /// Matches token_lit = LIT_INT | ... pub fn lit_from_token(&mut self, tok: &token::Token) -> Lit_ { match *tok { token::LIT_BYTE(i) => LitByte(i), @@ -1562,7 +1562,7 @@ impl<'a> Parser<'a> { } } - // matches lit = true | false | token_lit + /// Matches lit = true | false | token_lit pub fn parse_lit(&mut self) -> Lit { let lo = self.span.lo; let lit = if self.eat_keyword(keywords::True) { @@ -1577,7 +1577,7 @@ impl<'a> Parser<'a> { codemap::Spanned { node: lit, span: mk_sp(lo, self.last_span.hi) } } - // matches '-' lit | lit + /// matches '-' lit | lit pub fn parse_literal_maybe_minus(&mut self) -> Gc { let minus_lo = self.span.lo; let minus_present = self.eat(&token::BINOP(token::MINUS)); @@ -1715,7 +1715,7 @@ impl<'a> Parser<'a> { } /// Parses a single lifetime - // matches lifetime = LIFETIME + /// Matches lifetime = LIFETIME pub fn parse_lifetime(&mut self) -> ast::Lifetime { match self.token { token::LIFETIME(i) => { @@ -1775,7 +1775,7 @@ impl<'a> Parser<'a> { token::is_keyword(keywords::Const, tok) } - // parse mutability declaration (mut/const/imm) + /// Parse mutability declaration (mut/const/imm) pub fn parse_mutability(&mut self) -> Mutability { if self.eat_keyword(keywords::Mut) { MutMutable @@ -1784,7 +1784,7 @@ impl<'a> Parser<'a> { } } - // parse ident COLON expr + /// Parse ident COLON expr pub fn parse_field(&mut self) -> Field { let lo = self.span.lo; let i = self.parse_ident(); @@ -1863,9 +1863,9 @@ impl<'a> Parser<'a> { } } - // at the bottom (top?) of the precedence hierarchy, - // parse things like parenthesized exprs, - // macros, return, etc. + /// At the bottom (top?) of the precedence hierarchy, + /// parse things like parenthesized exprs, + /// macros, return, etc. pub fn parse_bottom_expr(&mut self) -> Gc { maybe_whole_expr!(self); @@ -2084,7 +2084,7 @@ impl<'a> Parser<'a> { return self.mk_expr(lo, hi, ex); } - // parse a block or unsafe block + /// Parse a block or unsafe block pub fn parse_block_expr(&mut self, lo: BytePos, blk_mode: BlockCheckMode) -> Gc { self.expect(&token::LBRACE); @@ -2092,7 +2092,7 @@ impl<'a> Parser<'a> { return self.mk_expr(blk.span.lo, blk.span.hi, ExprBlock(blk)); } - // parse a.b or a(13) or a[4] or just a + /// parse a.b or a(13) or a[4] or just a pub fn parse_dot_or_call_expr(&mut self) -> Gc { let b = self.parse_bottom_expr(); self.parse_dot_or_call_expr_with(b) @@ -2176,8 +2176,8 @@ impl<'a> Parser<'a> { return e; } - // parse an optional separator followed by a kleene-style - // repetition token (+ or *). + /// Parse an optional separator followed by a kleene-style + /// repetition token (+ or *). pub fn parse_sep_and_zerok(&mut self) -> (Option, bool) { fn parse_zerok(parser: &mut Parser) -> Option { match parser.token { @@ -2202,7 +2202,7 @@ impl<'a> Parser<'a> { } } - // parse a single token tree from the input. + /// parse a single token tree from the input. pub fn parse_token_tree(&mut self) -> TokenTree { // FIXME #6994: currently, this is too eager. It // parses token trees but also identifies TTSeq's @@ -2318,9 +2318,9 @@ impl<'a> Parser<'a> { } } - // This goofy function is necessary to correctly match parens in Matcher's. - // Otherwise, `$( ( )` would be a valid Matcher, and `$( () )` would be - // invalid. It's similar to common::parse_seq. + /// This goofy function is necessary to correctly match parens in Matcher's. + /// Otherwise, `$( ( )` would be a valid Matcher, and `$( () )` would be + /// invalid. It's similar to common::parse_seq. pub fn parse_matcher_subseq_upto(&mut self, name_idx: &mut uint, ket: &token::Token) @@ -2369,7 +2369,7 @@ impl<'a> Parser<'a> { return spanned(lo, self.span.hi, m); } - // parse a prefix-operator expr + /// Parse a prefix-operator expr pub fn parse_prefix_expr(&mut self) -> Gc { let lo = self.span.lo; let hi; @@ -2478,13 +2478,13 @@ impl<'a> Parser<'a> { return self.mk_expr(lo, hi, ex); } - // parse an expression of binops + /// Parse an expression of binops pub fn parse_binops(&mut self) -> Gc { let prefix_expr = self.parse_prefix_expr(); self.parse_more_binops(prefix_expr, 0) } - // parse an expression of binops of at least min_prec precedence + /// Parse an expression of binops of at least min_prec precedence pub fn parse_more_binops(&mut self, lhs: Gc, min_prec: uint) -> Gc { if self.expr_is_complete(lhs) { return lhs; } @@ -2532,9 +2532,9 @@ impl<'a> Parser<'a> { } } - // parse an assignment expression.... - // actually, this seems to be the main entry point for - // parsing an arbitrary expression. + /// Parse an assignment expression.... + /// actually, this seems to be the main entry point for + /// parsing an arbitrary expression. pub fn parse_assign_expr(&mut self) -> Gc { let lo = self.span.lo; let lhs = self.parse_binops(); @@ -2568,7 +2568,7 @@ impl<'a> Parser<'a> { } } - // parse an 'if' expression ('if' token already eaten) + /// Parse an 'if' expression ('if' token already eaten) pub fn parse_if_expr(&mut self) -> Gc { let lo = self.last_span.lo; let cond = self.parse_expr_res(RESTRICT_NO_STRUCT_LITERAL); @@ -2583,7 +2583,7 @@ impl<'a> Parser<'a> { self.mk_expr(lo, hi, ExprIf(cond, thn, els)) } - // `|args| { ... }` or `{ ...}` like in `do` expressions + /// `|args| { ... }` or `{ ...}` like in `do` expressions pub fn parse_lambda_block_expr(&mut self) -> Gc { self.parse_lambda_expr_( |p| { @@ -2612,15 +2612,15 @@ impl<'a> Parser<'a> { }) } - // `|args| expr` + /// `|args| expr` pub fn parse_lambda_expr(&mut self) -> Gc { self.parse_lambda_expr_(|p| p.parse_fn_block_decl(), |p| p.parse_expr()) } - // parse something of the form |args| expr - // this is used both in parsing a lambda expr - // and in parsing a block expr as e.g. in for... + /// parse something of the form |args| expr + /// this is used both in parsing a lambda expr + /// and in parsing a block expr as e.g. in for... pub fn parse_lambda_expr_(&mut self, parse_decl: |&mut Parser| -> P, parse_body: |&mut Parser| -> Gc) @@ -2649,7 +2649,7 @@ impl<'a> Parser<'a> { } } - // parse a 'for' .. 'in' expression ('for' token already eaten) + /// Parse a 'for' .. 'in' expression ('for' token already eaten) pub fn parse_for_expr(&mut self, opt_ident: Option) -> Gc { // Parse: `for in ` @@ -2715,12 +2715,12 @@ impl<'a> Parser<'a> { return self.mk_expr(lo, hi, ExprMatch(discriminant, arms)); } - // parse an expression + /// Parse an expression pub fn parse_expr(&mut self) -> Gc { return self.parse_expr_res(UNRESTRICTED); } - // parse an expression, subject to the given restriction + /// Parse an expression, subject to the given restriction fn parse_expr_res(&mut self, r: restriction) -> Gc { let old = self.restriction; self.restriction = r; @@ -2729,7 +2729,7 @@ impl<'a> Parser<'a> { return e; } - // parse the RHS of a local variable declaration (e.g. '= 14;') + /// Parse the RHS of a local variable declaration (e.g. '= 14;') fn parse_initializer(&mut self) -> Option> { if self.token == token::EQ { self.bump(); @@ -2739,7 +2739,7 @@ impl<'a> Parser<'a> { } } - // parse patterns, separated by '|' s + /// Parse patterns, separated by '|' s fn parse_pats(&mut self) -> Vec> { let mut pats = Vec::new(); loop { @@ -2802,7 +2802,7 @@ impl<'a> Parser<'a> { (before, slice, after) } - // parse the fields of a struct-like pattern + /// Parse the fields of a struct-like pattern fn parse_pat_fields(&mut self) -> (Vec , bool) { let mut fields = Vec::new(); let mut etc = false; @@ -2863,7 +2863,7 @@ impl<'a> Parser<'a> { return (fields, etc); } - // parse a pattern. + /// Parse a pattern. pub fn parse_pat(&mut self) -> Gc { maybe_whole!(self, NtPat); @@ -3099,9 +3099,9 @@ impl<'a> Parser<'a> { } } - // parse ident or ident @ pat - // used by the copy foo and ref foo patterns to give a good - // error message when parsing mistakes like ref foo(a,b) + /// Parse ident or ident @ pat + /// used by the copy foo and ref foo patterns to give a good + /// error message when parsing mistakes like ref foo(a,b) fn parse_pat_ident(&mut self, binding_mode: ast::BindingMode) -> ast::Pat_ { @@ -3134,7 +3134,7 @@ impl<'a> Parser<'a> { PatIdent(binding_mode, name, sub) } - // parse a local variable declaration + /// Parse a local variable declaration fn parse_local(&mut self) -> Gc { let lo = self.span.lo; let pat = self.parse_pat(); @@ -3158,14 +3158,14 @@ impl<'a> Parser<'a> { } } - // parse a "let" stmt + /// Parse a "let" stmt fn parse_let(&mut self) -> Gc { let lo = self.span.lo; let local = self.parse_local(); box(GC) spanned(lo, self.last_span.hi, DeclLocal(local)) } - // parse a structure field + /// Parse a structure field fn parse_name_and_ty(&mut self, pr: Visibility, attrs: Vec ) -> StructField { let lo = self.span.lo; @@ -3183,8 +3183,8 @@ impl<'a> Parser<'a> { }) } - // parse a statement. may include decl. - // precondition: any attributes are parsed already + /// Parse a statement. may include decl. + /// Precondition: any attributes are parsed already pub fn parse_stmt(&mut self, item_attrs: Vec) -> Gc { maybe_whole!(self, NtStmt); @@ -3299,13 +3299,13 @@ impl<'a> Parser<'a> { } } - // is this expression a successfully-parsed statement? + /// Is this expression a successfully-parsed statement? fn expr_is_complete(&mut self, e: Gc) -> bool { return self.restriction == RESTRICT_STMT_EXPR && !classify::expr_requires_semi_to_be_stmt(e); } - // parse a block. No inner attrs are allowed. + /// Parse a block. No inner attrs are allowed. pub fn parse_block(&mut self) -> P { maybe_whole!(no_clone self, NtBlock); @@ -3315,7 +3315,7 @@ impl<'a> Parser<'a> { return self.parse_block_tail_(lo, DefaultBlock, Vec::new()); } - // parse a block. Inner attrs are allowed. + /// Parse a block. Inner attrs are allowed. fn parse_inner_attrs_and_block(&mut self) -> (Vec , P) { @@ -3328,15 +3328,15 @@ impl<'a> Parser<'a> { (inner, self.parse_block_tail_(lo, DefaultBlock, next)) } - // Precondition: already parsed the '{' or '#{' - // I guess that also means "already parsed the 'impure'" if - // necessary, and this should take a qualifier. - // some blocks start with "#{"... + /// Precondition: already parsed the '{' or '#{' + /// I guess that also means "already parsed the 'impure'" if + /// necessary, and this should take a qualifier. + /// Some blocks start with "#{"... fn parse_block_tail(&mut self, lo: BytePos, s: BlockCheckMode) -> P { self.parse_block_tail_(lo, s, Vec::new()) } - // parse the rest of a block expression or function body + /// Parse the rest of a block expression or function body fn parse_block_tail_(&mut self, lo: BytePos, s: BlockCheckMode, first_item_attrs: Vec ) -> P { let mut stmts = Vec::new(); @@ -3494,18 +3494,18 @@ impl<'a> Parser<'a> { } } - // matches bounds = ( boundseq )? - // where boundseq = ( bound + boundseq ) | bound - // and bound = 'static | ty - // Returns "None" if there's no colon (e.g. "T"); - // Returns "Some(Empty)" if there's a colon but nothing after (e.g. "T:") - // Returns "Some(stuff)" otherwise (e.g. "T:stuff"). - // NB: The None/Some distinction is important for issue #7264. - // - // Note that the `allow_any_lifetime` argument is a hack for now while the - // AST doesn't support arbitrary lifetimes in bounds on type parameters. In - // the future, this flag should be removed, and the return value of this - // function should be Option<~[TyParamBound]> + /// matches optbounds = ( ( : ( boundseq )? )? ) + /// where boundseq = ( bound + boundseq ) | bound + /// and bound = 'static | ty + /// Returns "None" if there's no colon (e.g. "T"); + /// Returns "Some(Empty)" if there's a colon but nothing after (e.g. "T:") + /// Returns "Some(stuff)" otherwise (e.g. "T:stuff"). + /// NB: The None/Some distinction is important for issue #7264. + /// + /// Note that the `allow_any_lifetime` argument is a hack for now while the + /// AST doesn't support arbitrary lifetimes in bounds on type parameters. In + /// the future, this flag should be removed, and the return value of this + /// function should be Option<~[TyParamBound]> fn parse_ty_param_bounds(&mut self, allow_any_lifetime: bool) -> (Option, OwnedSlice) { @@ -3555,7 +3555,7 @@ impl<'a> Parser<'a> { return (ret_lifetime, OwnedSlice::from_vec(result)); } - // matches typaram = type? IDENT optbounds ( EQ ty )? + /// Matches typaram = type? IDENT optbounds ( EQ ty )? fn parse_ty_param(&mut self) -> TyParam { let sized = self.parse_sized(); let span = self.span; @@ -3587,10 +3587,10 @@ impl<'a> Parser<'a> { } } - // parse a set of optional generic type parameter declarations - // matches generics = ( ) | ( < > ) | ( < typaramseq ( , )? > ) | ( < lifetimes ( , )? > ) - // | ( < lifetimes , typaramseq ( , )? > ) - // where typaramseq = ( typaram ) | ( typaram , typaramseq ) + /// Parse a set of optional generic type parameter declarations + /// matches generics = ( ) | ( < > ) | ( < typaramseq ( , )? > ) | ( < lifetimes ( , )? > ) + /// | ( < lifetimes , typaramseq ( , )? > ) + /// where typaramseq = ( typaram ) | ( typaram , typaramseq ) pub fn parse_generics(&mut self) -> ast::Generics { if self.eat(&token::LT) { let lifetimes = self.parse_lifetimes(); @@ -3682,7 +3682,7 @@ impl<'a> Parser<'a> { (args, variadic) } - // parse the argument list and result type of a function declaration + /// Parse the argument list and result type of a function declaration pub fn parse_fn_decl(&mut self, allow_variadic: bool) -> P { let (args, variadic) = self.parse_fn_args(true, allow_variadic); @@ -3712,8 +3712,8 @@ impl<'a> Parser<'a> { self.bump(); } - // parse the argument list and result type of a function - // that may have a self type. + /// Parse the argument list and result type of a function + /// that may have a self type. fn parse_fn_decl_with_self(&mut self, parse_arg_fn: |&mut Parser| -> Arg) -> (ExplicitSelf, P) { fn maybe_parse_borrowed_explicit_self(this: &mut Parser) @@ -3864,7 +3864,7 @@ impl<'a> Parser<'a> { (spanned(lo, hi, explicit_self), fn_decl) } - // parse the |arg, arg| header on a lambda + /// Parse the |arg, arg| header on a lambda fn parse_fn_block_decl(&mut self) -> P { let inputs_captures = { if self.eat(&token::OROR) { @@ -3896,7 +3896,7 @@ impl<'a> Parser<'a> { }) } - // Parses the `(arg, arg) -> return_type` header on a procedure. + /// Parses the `(arg, arg) -> return_type` header on a procedure. fn parse_proc_decl(&mut self) -> P { let inputs = self.parse_unspanned_seq(&token::LPAREN, @@ -3922,7 +3922,7 @@ impl<'a> Parser<'a> { }) } - // parse the name and optional generic types of a function header. + /// Parse the name and optional generic types of a function header. fn parse_fn_header(&mut self) -> (Ident, ast::Generics) { let id = self.parse_ident(); let generics = self.parse_generics(); @@ -3942,7 +3942,7 @@ impl<'a> Parser<'a> { } } - // parse an item-position function declaration. + /// Parse an item-position function declaration. fn parse_item_fn(&mut self, fn_style: FnStyle, abi: abi::Abi) -> ItemInfo { let (ident, generics) = self.parse_fn_header(); let decl = self.parse_fn_decl(false); @@ -3950,7 +3950,7 @@ impl<'a> Parser<'a> { (ident, ItemFn(decl, fn_style, abi, generics, body), Some(inner_attrs)) } - // parse a method in a trait impl, starting with `attrs` attributes. + /// Parse a method in a trait impl, starting with `attrs` attributes. fn parse_method(&mut self, already_parsed_attrs: Option>) -> Gc { let next_attrs = self.parse_outer_attributes(); @@ -3986,7 +3986,7 @@ impl<'a> Parser<'a> { } } - // parse trait Foo { ... } + /// Parse trait Foo { ... } fn parse_item_trait(&mut self) -> ItemInfo { let ident = self.parse_ident(); let tps = self.parse_generics(); @@ -4005,9 +4005,9 @@ impl<'a> Parser<'a> { (ident, ItemTrait(tps, sized, traits, meths), None) } - // Parses two variants (with the region/type params always optional): - // impl Foo { ... } - // impl ToStr for ~[T] { ... } + /// Parses two variants (with the region/type params always optional): + /// impl Foo { ... } + /// impl ToStr for ~[T] { ... } fn parse_item_impl(&mut self) -> ItemInfo { // First, parse type parameters if necessary. let generics = self.parse_generics(); @@ -4060,7 +4060,7 @@ impl<'a> Parser<'a> { (ident, ItemImpl(generics, opt_trait, ty, meths), Some(inner_attrs)) } - // parse a::B + /// Parse a::B fn parse_trait_ref(&mut self) -> TraitRef { ast::TraitRef { path: self.parse_path(LifetimeAndTypesWithoutColons).path, @@ -4068,7 +4068,7 @@ impl<'a> Parser<'a> { } } - // parse B + C + D + /// Parse B + C + D fn parse_trait_ref_list(&mut self, ket: &token::Token) -> Vec { self.parse_seq_to_before_end( ket, @@ -4077,7 +4077,7 @@ impl<'a> Parser<'a> { ) } - // parse struct Foo { ... } + /// Parse struct Foo { ... } fn parse_item_struct(&mut self, is_virtual: bool) -> ItemInfo { let class_name = self.parse_ident(); let generics = self.parse_generics(); @@ -4160,7 +4160,7 @@ impl<'a> Parser<'a> { None) } - // parse a structure field declaration + /// Parse a structure field declaration pub fn parse_single_struct_field(&mut self, vis: Visibility, attrs: Vec ) @@ -4182,7 +4182,7 @@ impl<'a> Parser<'a> { a_var } - // parse an element of a struct definition + /// Parse an element of a struct definition fn parse_struct_decl_field(&mut self) -> StructField { let attrs = self.parse_outer_attributes(); @@ -4194,7 +4194,7 @@ impl<'a> Parser<'a> { return self.parse_single_struct_field(Inherited, attrs); } - // parse visiility: PUB, PRIV, or nothing + /// Parse visiility: PUB, PRIV, or nothing fn parse_visibility(&mut self) -> Visibility { if self.eat_keyword(keywords::Pub) { Public } else { Inherited } @@ -4218,8 +4218,8 @@ impl<'a> Parser<'a> { } } - // given a termination token and a vector of already-parsed - // attributes (of length 0 or 1), parse all of the items in a module + /// Given a termination token and a vector of already-parsed + /// attributes (of length 0 or 1), parse all of the items in a module fn parse_mod_items(&mut self, term: token::Token, first_item_attrs: Vec, @@ -4287,7 +4287,7 @@ impl<'a> Parser<'a> { (id, ItemStatic(ty, m, e), None) } - // parse a `mod { ... }` or `mod ;` item + /// Parse a `mod { ... }` or `mod ;` item fn parse_item_mod(&mut self, outer_attrs: &[Attribute]) -> ItemInfo { let id_span = self.span; let id = self.parse_ident(); @@ -4325,7 +4325,7 @@ impl<'a> Parser<'a> { self.mod_path_stack.pop().unwrap(); } - // read a module from a source file. + /// Read a module from a source file. fn eval_src_mod(&mut self, id: ast::Ident, outer_attrs: &[ast::Attribute], @@ -4433,7 +4433,7 @@ impl<'a> Parser<'a> { return (ast::ItemMod(m0), mod_attrs); } - // parse a function declaration from a foreign module + /// Parse a function declaration from a foreign module fn parse_item_foreign_fn(&mut self, vis: ast::Visibility, attrs: Vec) -> Gc { let lo = self.span.lo; @@ -4451,7 +4451,7 @@ impl<'a> Parser<'a> { vis: vis } } - // parse a static item from a foreign module + /// Parse a static item from a foreign module fn parse_item_foreign_static(&mut self, vis: ast::Visibility, attrs: Vec ) -> Gc { let lo = self.span.lo; @@ -4474,7 +4474,7 @@ impl<'a> Parser<'a> { } } - // parse safe/unsafe and fn + /// Parse safe/unsafe and fn fn parse_fn_style(&mut self) -> FnStyle { if self.eat_keyword(keywords::Fn) { NormalFn } else if self.eat_keyword(keywords::Unsafe) { @@ -4485,8 +4485,8 @@ impl<'a> Parser<'a> { } - // at this point, this is essentially a wrapper for - // parse_foreign_items. + /// At this point, this is essentially a wrapper for + /// parse_foreign_items. fn parse_foreign_mod_items(&mut self, abi: abi::Abi, first_item_attrs: Vec ) @@ -4587,7 +4587,7 @@ impl<'a> Parser<'a> { return IoviItem(item); } - // parse type Foo = Bar; + /// Parse type Foo = Bar; fn parse_item_type(&mut self) -> ItemInfo { let ident = self.parse_ident(); let tps = self.parse_generics(); @@ -4597,8 +4597,8 @@ impl<'a> Parser<'a> { (ident, ItemTy(ty, tps), None) } - // parse a structure-like enum variant definition - // this should probably be renamed or refactored... + /// Parse a structure-like enum variant definition + /// this should probably be renamed or refactored... fn parse_struct_def(&mut self) -> Gc { let mut fields: Vec = Vec::new(); while self.token != token::RBRACE { @@ -4614,7 +4614,7 @@ impl<'a> Parser<'a> { }; } - // parse the part of an "enum" decl following the '{' + /// Parse the part of an "enum" decl following the '{' fn parse_enum_def(&mut self, _generics: &ast::Generics) -> EnumDef { let mut variants = Vec::new(); let mut all_nullary = true; @@ -4678,7 +4678,7 @@ impl<'a> Parser<'a> { ast::EnumDef { variants: variants } } - // parse an "enum" declaration + /// Parse an "enum" declaration fn parse_item_enum(&mut self) -> ItemInfo { let id = self.parse_ident(); let generics = self.parse_generics(); @@ -4695,8 +4695,8 @@ impl<'a> Parser<'a> { } } - // Parses a string as an ABI spec on an extern type or module. Consumes - // the `extern` keyword, if one is found. + /// Parses a string as an ABI spec on an extern type or module. Consumes + /// the `extern` keyword, if one is found. fn parse_opt_abi(&mut self) -> Option { match self.token { token::LIT_STR(s) | token::LIT_STR_RAW(s, _) => { @@ -4722,10 +4722,10 @@ impl<'a> Parser<'a> { } } - // parse one of the items or view items allowed by the - // flags; on failure, return IoviNone. - // NB: this function no longer parses the items inside an - // extern crate. + /// Parse one of the items or view items allowed by the + /// flags; on failure, return IoviNone. + /// NB: this function no longer parses the items inside an + /// extern crate. fn parse_item_or_view_item(&mut self, attrs: Vec , macros_allowed: bool) @@ -4933,7 +4933,7 @@ impl<'a> Parser<'a> { self.parse_macro_use_or_failure(attrs,macros_allowed,lo,visibility) } - // parse a foreign item; on failure, return IoviNone. + /// Parse a foreign item; on failure, return IoviNone. fn parse_foreign_item(&mut self, attrs: Vec , macros_allowed: bool) @@ -4956,7 +4956,7 @@ impl<'a> Parser<'a> { self.parse_macro_use_or_failure(attrs,macros_allowed,lo,visibility) } - // this is the fall-through for parsing items. + /// This is the fall-through for parsing items. fn parse_macro_use_or_failure( &mut self, attrs: Vec , @@ -5040,17 +5040,17 @@ impl<'a> Parser<'a> { } } - // parse, e.g., "use a::b::{z,y}" + /// Parse, e.g., "use a::b::{z,y}" fn parse_use(&mut self) -> ViewItem_ { return ViewItemUse(self.parse_view_path()); } - // matches view_path : MOD? IDENT EQ non_global_path - // | MOD? non_global_path MOD_SEP LBRACE RBRACE - // | MOD? non_global_path MOD_SEP LBRACE ident_seq RBRACE - // | MOD? non_global_path MOD_SEP STAR - // | MOD? non_global_path + /// Matches view_path : MOD? IDENT EQ non_global_path + /// | MOD? non_global_path MOD_SEP LBRACE RBRACE + /// | MOD? non_global_path MOD_SEP LBRACE ident_seq RBRACE + /// | MOD? non_global_path MOD_SEP STAR + /// | MOD? non_global_path fn parse_view_path(&mut self) -> Gc { let lo = self.span.lo; @@ -5173,10 +5173,10 @@ impl<'a> Parser<'a> { ViewPathSimple(last, path, ast::DUMMY_NODE_ID)); } - // Parses a sequence of items. Stops when it finds program - // text that can't be parsed as an item - // - mod_items uses extern_mod_allowed = true - // - block_tail_ uses extern_mod_allowed = false + /// Parses a sequence of items. Stops when it finds program + /// text that can't be parsed as an item + /// - mod_items uses extern_mod_allowed = true + /// - block_tail_ uses extern_mod_allowed = false fn parse_items_and_view_items(&mut self, first_item_attrs: Vec , mut extern_mod_allowed: bool, @@ -5258,8 +5258,8 @@ impl<'a> Parser<'a> { } } - // Parses a sequence of foreign items. Stops when it finds program - // text that can't be parsed as an item + /// Parses a sequence of foreign items. Stops when it finds program + /// text that can't be parsed as an item fn parse_foreign_items(&mut self, first_item_attrs: Vec , macros_allowed: bool) -> ParsedItemsAndViewItems { @@ -5298,8 +5298,8 @@ impl<'a> Parser<'a> { } } - // Parses a source module as a crate. This is the main - // entry point for the parser. + /// Parses a source module as a crate. This is the main + /// entry point for the parser. pub fn parse_crate_mod(&mut self) -> Crate { let lo = self.span.lo; // parse the crate's inner attrs, maybe (oops) one diff --git a/src/libsyntax/parse/token.rs b/src/libsyntax/parse/token.rs index 8e36339b0e5e3..46dcd7ebbb3e4 100644 --- a/src/libsyntax/parse/token.rs +++ b/src/libsyntax/parse/token.rs @@ -83,25 +83,25 @@ pub enum Token { LIT_INT(i64, ast::IntTy), LIT_UINT(u64, ast::UintTy), LIT_INT_UNSUFFIXED(i64), - LIT_FLOAT(ast::Ident, ast::FloatTy), - LIT_FLOAT_UNSUFFIXED(ast::Ident), - LIT_STR(ast::Ident), - LIT_STR_RAW(ast::Ident, uint), /* raw str delimited by n hash symbols */ + LIT_FLOAT(Ident, ast::FloatTy), + LIT_FLOAT_UNSUFFIXED(Ident), + LIT_STR(Ident), + LIT_STR_RAW(Ident, uint), /* raw str delimited by n hash symbols */ LIT_BINARY(Rc>), LIT_BINARY_RAW(Rc>, uint), /* raw binary str delimited by n hash symbols */ /* Name components */ - // an identifier contains an "is_mod_name" boolean, - // indicating whether :: follows this token with no - // whitespace in between. - IDENT(ast::Ident, bool), + /// An identifier contains an "is_mod_name" boolean, + /// indicating whether :: follows this token with no + /// whitespace in between. + IDENT(Ident, bool), UNDERSCORE, - LIFETIME(ast::Ident), + LIFETIME(Ident), /* For interpolation */ INTERPOLATED(Nonterminal), - DOC_COMMENT(ast::Ident), + DOC_COMMENT(Ident), EOF, } @@ -114,7 +114,7 @@ pub enum Nonterminal { NtPat( Gc), NtExpr(Gc), NtTy( P), - NtIdent(Box, bool), + NtIdent(Box, bool), NtMeta(Gc), // stuff inside brackets for attributes NtPath(Box), NtTT( Gc), // needs @ed to break a circularity @@ -681,20 +681,20 @@ pub fn gensym(s: &str) -> Name { /// Maps a string to an identifier with an empty syntax context. #[inline] -pub fn str_to_ident(s: &str) -> ast::Ident { - ast::Ident::new(intern(s)) +pub fn str_to_ident(s: &str) -> Ident { + Ident::new(intern(s)) } /// Maps a string to a gensym'ed identifier. #[inline] -pub fn gensym_ident(s: &str) -> ast::Ident { - ast::Ident::new(gensym(s)) +pub fn gensym_ident(s: &str) -> Ident { + Ident::new(gensym(s)) } // create a fresh name that maps to the same string as the old one. // note that this guarantees that str_ptr_eq(ident_to_str(src),interner_get(fresh_name(src))); // that is, that the new name and the old one are connected to ptr_eq strings. -pub fn fresh_name(src: &ast::Ident) -> Name { +pub fn fresh_name(src: &Ident) -> Name { let interner = get_ident_interner(); interner.gensym_copy(src.name) // following: debug version. Could work in final except that it's incompatible with @@ -765,8 +765,8 @@ mod test { use ast; use ext::mtwt; - fn mark_ident(id : ast::Ident, m : ast::Mrk) -> ast::Ident { - ast::Ident{name:id.name,ctxt:mtwt::new_mark(m,id.ctxt)} + fn mark_ident(id : Ident, m : ast::Mrk) -> Ident { + Ident{name:id.name,ctxt:mtwt::new_mark(m,id.ctxt)} } #[test] fn mtwt_token_eq_test() { diff --git a/src/libsyntax/print/pp.rs b/src/libsyntax/print/pp.rs index 672e08af2d8ff..814c80eb472e3 100644 --- a/src/libsyntax/print/pp.rs +++ b/src/libsyntax/print/pp.rs @@ -8,58 +8,56 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/* - * This pretty-printer is a direct reimplementation of Philip Karlton's - * Mesa pretty-printer, as described in appendix A of - * - * STAN-CS-79-770: "Pretty Printing", by Derek C. Oppen. - * Stanford Department of Computer Science, 1979. - * - * The algorithm's aim is to break a stream into as few lines as possible - * while respecting the indentation-consistency requirements of the enclosing - * block, and avoiding breaking at silly places on block boundaries, for - * example, between "x" and ")" in "x)". - * - * I am implementing this algorithm because it comes with 20 pages of - * documentation explaining its theory, and because it addresses the set of - * concerns I've seen other pretty-printers fall down on. Weirdly. Even though - * it's 32 years old. What can I say? - * - * Despite some redundancies and quirks in the way it's implemented in that - * paper, I've opted to keep the implementation here as similar as I can, - * changing only what was blatantly wrong, a typo, or sufficiently - * non-idiomatic rust that it really stuck out. - * - * In particular you'll see a certain amount of churn related to INTEGER vs. - * CARDINAL in the Mesa implementation. Mesa apparently interconverts the two - * somewhat readily? In any case, I've used uint for indices-in-buffers and - * ints for character-sizes-and-indentation-offsets. This respects the need - * for ints to "go negative" while carrying a pending-calculation balance, and - * helps differentiate all the numbers flying around internally (slightly). - * - * I also inverted the indentation arithmetic used in the print stack, since - * the Mesa implementation (somewhat randomly) stores the offset on the print - * stack in terms of margin-col rather than col itself. I store col. - * - * I also implemented a small change in the String token, in that I store an - * explicit length for the string. For most tokens this is just the length of - * the accompanying string. But it's necessary to permit it to differ, for - * encoding things that are supposed to "go on their own line" -- certain - * classes of comment and blank-line -- where relying on adjacent - * hardbreak-like Break tokens with long blankness indication doesn't actually - * work. To see why, consider when there is a "thing that should be on its own - * line" between two long blocks, say functions. If you put a hardbreak after - * each function (or before each) and the breaking algorithm decides to break - * there anyways (because the functions themselves are long) you wind up with - * extra blank lines. If you don't put hardbreaks you can wind up with the - * "thing which should be on its own line" not getting its own line in the - * rare case of "really small functions" or such. This re-occurs with comments - * and explicit blank lines. So in those cases we use a string with a payload - * we want isolated to a line and an explicit length that's huge, surrounded - * by two zero-length breaks. The algorithm will try its best to fit it on a - * line (which it can't) and so naturally place the content on its own line to - * avoid combining it with other lines and making matters even worse. - */ +//! This pretty-printer is a direct reimplementation of Philip Karlton's +//! Mesa pretty-printer, as described in appendix A of +//! +//! STAN-CS-79-770: "Pretty Printing", by Derek C. Oppen. +//! Stanford Department of Computer Science, 1979. +//! +//! The algorithm's aim is to break a stream into as few lines as possible +//! while respecting the indentation-consistency requirements of the enclosing +//! block, and avoiding breaking at silly places on block boundaries, for +//! example, between "x" and ")" in "x)". +//! +//! I am implementing this algorithm because it comes with 20 pages of +//! documentation explaining its theory, and because it addresses the set of +//! concerns I've seen other pretty-printers fall down on. Weirdly. Even though +//! it's 32 years old. What can I say? +//! +//! Despite some redundancies and quirks in the way it's implemented in that +//! paper, I've opted to keep the implementation here as similar as I can, +//! changing only what was blatantly wrong, a typo, or sufficiently +//! non-idiomatic rust that it really stuck out. +//! +//! In particular you'll see a certain amount of churn related to INTEGER vs. +//! CARDINAL in the Mesa implementation. Mesa apparently interconverts the two +//! somewhat readily? In any case, I've used uint for indices-in-buffers and +//! ints for character-sizes-and-indentation-offsets. This respects the need +//! for ints to "go negative" while carrying a pending-calculation balance, and +//! helps differentiate all the numbers flying around internally (slightly). +//! +//! I also inverted the indentation arithmetic used in the print stack, since +//! the Mesa implementation (somewhat randomly) stores the offset on the print +//! stack in terms of margin-col rather than col itself. I store col. +//! +//! I also implemented a small change in the String token, in that I store an +//! explicit length for the string. For most tokens this is just the length of +//! the accompanying string. But it's necessary to permit it to differ, for +//! encoding things that are supposed to "go on their own line" -- certain +//! classes of comment and blank-line -- where relying on adjacent +//! hardbreak-like Break tokens with long blankness indication doesn't actually +//! work. To see why, consider when there is a "thing that should be on its own +//! line" between two long blocks, say functions. If you put a hardbreak after +//! each function (or before each) and the breaking algorithm decides to break +//! there anyways (because the functions themselves are long) you wind up with +//! extra blank lines. If you don't put hardbreaks you can wind up with the +//! "thing which should be on its own line" not getting its own line in the +//! rare case of "really small functions" or such. This re-occurs with comments +//! and explicit blank lines. So in those cases we use a string with a payload +//! we want isolated to a line and an explicit length that's huge, surrounded +//! by two zero-length breaks. The algorithm will try its best to fit it on a +//! line (which it can't) and so naturally place the content on its own line to +//! avoid combining it with other lines and making matters even worse. use std::io; use std::string::String; @@ -186,107 +184,116 @@ pub fn mk_printer(out: Box, linewidth: uint) -> Printer { } -/* - * In case you do not have the paper, here is an explanation of what's going - * on. - * - * There is a stream of input tokens flowing through this printer. - * - * The printer buffers up to 3N tokens inside itself, where N is linewidth. - * Yes, linewidth is chars and tokens are multi-char, but in the worst - * case every token worth buffering is 1 char long, so it's ok. - * - * Tokens are String, Break, and Begin/End to delimit blocks. - * - * Begin tokens can carry an offset, saying "how far to indent when you break - * inside here", as well as a flag indicating "consistent" or "inconsistent" - * breaking. Consistent breaking means that after the first break, no attempt - * will be made to flow subsequent breaks together onto lines. Inconsistent - * is the opposite. Inconsistent breaking example would be, say: - * - * foo(hello, there, good, friends) - * - * breaking inconsistently to become - * - * foo(hello, there - * good, friends); - * - * whereas a consistent breaking would yield: - * - * foo(hello, - * there - * good, - * friends); - * - * That is, in the consistent-break blocks we value vertical alignment - * more than the ability to cram stuff onto a line. But in all cases if it - * can make a block a one-liner, it'll do so. - * - * Carrying on with high-level logic: - * - * The buffered tokens go through a ring-buffer, 'tokens'. The 'left' and - * 'right' indices denote the active portion of the ring buffer as well as - * describing hypothetical points-in-the-infinite-stream at most 3N tokens - * apart (i.e. "not wrapped to ring-buffer boundaries"). The paper will switch - * between using 'left' and 'right' terms to denote the wrapepd-to-ring-buffer - * and point-in-infinite-stream senses freely. - * - * There is a parallel ring buffer, 'size', that holds the calculated size of - * each token. Why calculated? Because for Begin/End pairs, the "size" - * includes everything between the pair. That is, the "size" of Begin is - * actually the sum of the sizes of everything between Begin and the paired - * End that follows. Since that is arbitrarily far in the future, 'size' is - * being rewritten regularly while the printer runs; in fact most of the - * machinery is here to work out 'size' entries on the fly (and give up when - * they're so obviously over-long that "infinity" is a good enough - * approximation for purposes of line breaking). - * - * The "input side" of the printer is managed as an abstract process called - * SCAN, which uses 'scan_stack', 'scan_stack_empty', 'top' and 'bottom', to - * manage calculating 'size'. SCAN is, in other words, the process of - * calculating 'size' entries. - * - * The "output side" of the printer is managed by an abstract process called - * PRINT, which uses 'print_stack', 'margin' and 'space' to figure out what to - * do with each token/size pair it consumes as it goes. It's trying to consume - * the entire buffered window, but can't output anything until the size is >= - * 0 (sizes are set to negative while they're pending calculation). - * - * So SCAN takes input and buffers tokens and pending calculations, while - * PRINT gobbles up completed calculations and tokens from the buffer. The - * theory is that the two can never get more than 3N tokens apart, because - * once there's "obviously" too much data to fit on a line, in a size - * calculation, SCAN will write "infinity" to the size and let PRINT consume - * it. - * - * In this implementation (following the paper, again) the SCAN process is - * the method called 'pretty_print', and the 'PRINT' process is the method - * called 'print'. - */ +/// In case you do not have the paper, here is an explanation of what's going +/// on. +/// +/// There is a stream of input tokens flowing through this printer. +/// +/// The printer buffers up to 3N tokens inside itself, where N is linewidth. +/// Yes, linewidth is chars and tokens are multi-char, but in the worst +/// case every token worth buffering is 1 char long, so it's ok. +/// +/// Tokens are String, Break, and Begin/End to delimit blocks. +/// +/// Begin tokens can carry an offset, saying "how far to indent when you break +/// inside here", as well as a flag indicating "consistent" or "inconsistent" +/// breaking. Consistent breaking means that after the first break, no attempt +/// will be made to flow subsequent breaks together onto lines. Inconsistent +/// is the opposite. Inconsistent breaking example would be, say: +/// +/// foo(hello, there, good, friends) +/// +/// breaking inconsistently to become +/// +/// foo(hello, there +/// good, friends); +/// +/// whereas a consistent breaking would yield: +/// +/// foo(hello, +/// there +/// good, +/// friends); +/// +/// That is, in the consistent-break blocks we value vertical alignment +/// more than the ability to cram stuff onto a line. But in all cases if it +/// can make a block a one-liner, it'll do so. +/// +/// Carrying on with high-level logic: +/// +/// The buffered tokens go through a ring-buffer, 'tokens'. The 'left' and +/// 'right' indices denote the active portion of the ring buffer as well as +/// describing hypothetical points-in-the-infinite-stream at most 3N tokens +/// apart (i.e. "not wrapped to ring-buffer boundaries"). The paper will switch +/// between using 'left' and 'right' terms to denote the wrapepd-to-ring-buffer +/// and point-in-infinite-stream senses freely. +/// +/// There is a parallel ring buffer, 'size', that holds the calculated size of +/// each token. Why calculated? Because for Begin/End pairs, the "size" +/// includes everything betwen the pair. That is, the "size" of Begin is +/// actually the sum of the sizes of everything between Begin and the paired +/// End that follows. Since that is arbitrarily far in the future, 'size' is +/// being rewritten regularly while the printer runs; in fact most of the +/// machinery is here to work out 'size' entries on the fly (and give up when +/// they're so obviously over-long that "infinity" is a good enough +/// approximation for purposes of line breaking). +/// +/// The "input side" of the printer is managed as an abstract process called +/// SCAN, which uses 'scan_stack', 'scan_stack_empty', 'top' and 'bottom', to +/// manage calculating 'size'. SCAN is, in other words, the process of +/// calculating 'size' entries. +/// +/// The "output side" of the printer is managed by an abstract process called +/// PRINT, which uses 'print_stack', 'margin' and 'space' to figure out what to +/// do with each token/size pair it consumes as it goes. It's trying to consume +/// the entire buffered window, but can't output anything until the size is >= +/// 0 (sizes are set to negative while they're pending calculation). +/// +/// So SCAN takes input and buffers tokens and pending calculations, while +/// PRINT gobbles up completed calculations and tokens from the buffer. The +/// theory is that the two can never get more than 3N tokens apart, because +/// once there's "obviously" too much data to fit on a line, in a size +/// calculation, SCAN will write "infinity" to the size and let PRINT consume +/// it. +/// +/// In this implementation (following the paper, again) the SCAN process is +/// the method called 'pretty_print', and the 'PRINT' process is the method +/// called 'print'. pub struct Printer { pub out: Box, buf_len: uint, - margin: int, // width of lines we're constrained to - space: int, // number of spaces left on line - left: uint, // index of left side of input stream - right: uint, // index of right side of input stream - token: Vec , // ring-buffr stream goes through - size: Vec , // ring-buffer of calculated sizes - left_total: int, // running size of stream "...left" - right_total: int, // running size of stream "...right" - // pseudo-stack, really a ring too. Holds the - // primary-ring-buffers index of the Begin that started the - // current block, possibly with the most recent Break after that - // Begin (if there is any) on top of it. Stuff is flushed off the - // bottom as it becomes irrelevant due to the primary ring-buffer - // advancing. + /// Width of lines we're constrained to + margin: int, + /// Number of spaces left on line + space: int, + /// Index of left side of input stream + left: uint, + /// Index of right side of input stream + right: uint, + /// Ring-buffr stream goes through + token: Vec , + /// Ring-buffer of calculated sizes + size: Vec , + /// Running size of stream "...left" + left_total: int, + /// Running size of stream "...right" + right_total: int, + /// Pseudo-stack, really a ring too. Holds the + /// primary-ring-buffers index of the Begin that started the + /// current block, possibly with the most recent Break after that + /// Begin (if there is any) on top of it. Stuff is flushed off the + /// bottom as it becomes irrelevant due to the primary ring-buffer + /// advancing. scan_stack: Vec , - scan_stack_empty: bool, // top==bottom disambiguator - top: uint, // index of top of scan_stack - bottom: uint, // index of bottom of scan_stack - // stack of blocks-in-progress being flushed by print + /// Top==bottom disambiguator + scan_stack_empty: bool, + /// Index of top of scan_stack + top: uint, + /// Index of bottom of scan_stack + bottom: uint, + /// Stack of blocks-in-progress being flushed by print print_stack: Vec , - // buffered indentation to avoid writing trailing whitespace + /// Buffered indentation to avoid writing trailing whitespace pending_indentation: int, } diff --git a/src/libsyntax/print/pprust.rs b/src/libsyntax/print/pprust.rs index fafebd3c5dc3e..f760519b39c63 100644 --- a/src/libsyntax/print/pprust.rs +++ b/src/libsyntax/print/pprust.rs @@ -88,9 +88,9 @@ pub static indent_unit: uint = 4u; pub static default_columns: uint = 78u; -// Requires you to pass an input filename and reader so that -// it can scan the input text for comments and literals to -// copy forward. +/// Requires you to pass an input filename and reader so that +/// it can scan the input text for comments and literals to +/// copy forward. pub fn print_crate<'a>(cm: &'a CodeMap, span_diagnostic: &diagnostic::SpanHandler, krate: &ast::Crate, diff --git a/src/libsyntax/util/interner.rs b/src/libsyntax/util/interner.rs index 4d88aaca7486b..55fff38f99131 100644 --- a/src/libsyntax/util/interner.rs +++ b/src/libsyntax/util/interner.rs @@ -8,9 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// An "interner" is a data structure that associates values with uint tags and -// allows bidirectional lookup; i.e. given a value, one can easily find the -// type, and vice versa. +//! An "interner" is a data structure that associates values with uint tags and +//! allows bidirectional lookup; i.e. given a value, one can easily find the +//! type, and vice versa. use ast::Name; diff --git a/src/libsyntax/util/parser_testing.rs b/src/libsyntax/util/parser_testing.rs index 04116dec60e31..f50739a7069e0 100644 --- a/src/libsyntax/util/parser_testing.rs +++ b/src/libsyntax/util/parser_testing.rs @@ -17,14 +17,14 @@ use parse::token; use std::gc::Gc; -// map a string to tts, using a made-up filename: +/// Map a string to tts, using a made-up filename: pub fn string_to_tts(source_str: String) -> Vec { let ps = new_parse_sess(); filemap_to_tts(&ps, string_to_filemap(&ps, source_str, "bogofile".to_string())) } -// map string to parser (via tts) +/// Map string to parser (via tts) pub fn string_to_parser<'a>(ps: &'a ParseSess, source_str: String) -> Parser<'a> { new_parser_from_source_str(ps, Vec::new(), @@ -40,51 +40,51 @@ fn with_error_checking_parse(s: String, f: |&mut Parser| -> T) -> T { x } -// parse a string, return a crate. +/// Parse a string, return a crate. pub fn string_to_crate (source_str : String) -> ast::Crate { with_error_checking_parse(source_str, |p| { p.parse_crate_mod() }) } -// parse a string, return an expr +/// Parse a string, return an expr pub fn string_to_expr (source_str : String) -> Gc { with_error_checking_parse(source_str, |p| { p.parse_expr() }) } -// parse a string, return an item +/// Parse a string, return an item pub fn string_to_item (source_str : String) -> Option> { with_error_checking_parse(source_str, |p| { p.parse_item(Vec::new()) }) } -// parse a string, return a stmt +/// Parse a string, return a stmt pub fn string_to_stmt(source_str : String) -> Gc { with_error_checking_parse(source_str, |p| { p.parse_stmt(Vec::new()) }) } -// parse a string, return a pat. Uses "irrefutable"... which doesn't -// (currently) affect parsing. +/// Parse a string, return a pat. Uses "irrefutable"... which doesn't +/// (currently) affect parsing. pub fn string_to_pat(source_str: String) -> Gc { string_to_parser(&new_parse_sess(), source_str).parse_pat() } -// convert a vector of strings to a vector of ast::Ident's +/// Convert a vector of strings to a vector of ast::Ident's pub fn strs_to_idents(ids: Vec<&str> ) -> Vec { ids.iter().map(|u| token::str_to_ident(*u)).collect() } -// does the given string match the pattern? whitespace in the first string -// may be deleted or replaced with other whitespace to match the pattern. -// this function is unicode-ignorant; fortunately, the careful design of -// UTF-8 mitigates this ignorance. In particular, this function only collapses -// sequences of \n, \r, ' ', and \t, but it should otherwise tolerate unicode -// chars. Unsurprisingly, it doesn't do NKF-normalization(?). +/// Does the given string match the pattern? whitespace in the first string +/// may be deleted or replaced with other whitespace to match the pattern. +/// this function is unicode-ignorant; fortunately, the careful design of +/// UTF-8 mitigates this ignorance. In particular, this function only collapses +/// sequences of \n, \r, ' ', and \t, but it should otherwise tolerate unicode +/// chars. Unsurprisingly, it doesn't do NKF-normalization(?). pub fn matches_codepattern(a : &str, b : &str) -> bool { let mut idx_a = 0; let mut idx_b = 0; @@ -122,9 +122,9 @@ pub fn matches_codepattern(a : &str, b : &str) -> bool { } } -// given a string and an index, return the first uint >= idx -// that is a non-ws-char or is outside of the legal range of -// the string. +/// Given a string and an index, return the first uint >= idx +/// that is a non-ws-char or is outside of the legal range of +/// the string. fn scan_for_non_ws_or_end(a : &str, idx: uint) -> uint { let mut i = idx; let len = a.len(); @@ -134,7 +134,7 @@ fn scan_for_non_ws_or_end(a : &str, idx: uint) -> uint { i } -// copied from lexer. +/// Copied from lexer. pub fn is_whitespace(c: char) -> bool { return c == ' ' || c == '\t' || c == '\r' || c == '\n'; } diff --git a/src/libsyntax/visit.rs b/src/libsyntax/visit.rs index 6f0fc217533fc..add6a3c9f42ea 100644 --- a/src/libsyntax/visit.rs +++ b/src/libsyntax/visit.rs @@ -8,6 +8,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +//! Context-passing AST walker. Each overridden visit method has full control +//! over what happens with its node, it can do its own traversal of the node's +//! children (potentially passing in different contexts to each), call +//! visit::visit_* to apply the default traversal algorithm (again, it can +//! override the context), or prevent deeper traversal by doing nothing. +//! +//! Note: it is an important invariant that the default visitor walks the body +//! of a function in "execution order" (more concretely, reverse post-order +//! with respect to the CFG implied by the AST), meaning that if AST node A may +//! execute before AST node B, then A is visited first. The borrow checker in +//! particular relies on this property. +//! use abi::Abi; use ast::*; use ast; @@ -17,27 +29,15 @@ use owned_slice::OwnedSlice; use std::gc::Gc; -// Context-passing AST walker. Each overridden visit method has full control -// over what happens with its node, it can do its own traversal of the node's -// children (potentially passing in different contexts to each), call -// visit::visit_* to apply the default traversal algorithm (again, it can -// override the context), or prevent deeper traversal by doing nothing. -// -// Note: it is an important invariant that the default visitor walks the body -// of a function in "execution order" (more concretely, reverse post-order -// with respect to the CFG implied by the AST), meaning that if AST node A may -// execute before AST node B, then A is visited first. The borrow checker in -// particular relies on this property. - pub enum FnKind<'a> { - // fn foo() or extern "Abi" fn foo() + /// fn foo() or extern "Abi" fn foo() FkItemFn(Ident, &'a Generics, FnStyle, Abi), - // fn foo(&self) + /// fn foo(&self) FkMethod(Ident, &'a Generics, &'a Method), - // |x, y| ... - // proc(x, y) ... + /// |x, y| ... + /// proc(x, y) ... FkFnBlock, } diff --git a/src/test/compile-fail/lex-illegal-num-char-escape.rs b/src/test/compile-fail/lex-bad-char-literals.rs similarity index 75% rename from src/test/compile-fail/lex-illegal-num-char-escape.rs rename to src/test/compile-fail/lex-bad-char-literals.rs index 8f4c756c891d5..9179a3633752e 100644 --- a/src/test/compile-fail/lex-illegal-num-char-escape.rs +++ b/src/test/compile-fail/lex-bad-char-literals.rs @@ -31,5 +31,19 @@ static s: &'static str = static s2: &'static str = "\u23q" //~ ERROR: illegal character in numeric character escape + //~^ ERROR: numeric character escape is too short +; + +static c: char = + '\●' //~ ERROR: unknown character escape +; + +static s: &'static str = + "\●" //~ ERROR: unknown string escape +; + +// THIS MUST BE LAST, since unterminated character constants kill the lexer + +static c: char = + '● //~ ERROR: unterminated character constant ; -//~^^ ERROR: numeric character escape is too short diff --git a/src/test/compile-fail/lex-bad-fp-base-2.rs b/src/test/compile-fail/lex-bad-fp-base-2.rs deleted file mode 100644 index b1d45f78e4a5b..0000000000000 --- a/src/test/compile-fail/lex-bad-fp-base-2.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -fn main() { - let b = 0o2f32; //~ ERROR: octal float literal is not supported -} diff --git a/src/test/compile-fail/lex-bad-fp-base-3.rs b/src/test/compile-fail/lex-bad-fp-base-3.rs deleted file mode 100644 index 79c42360adb2f..0000000000000 --- a/src/test/compile-fail/lex-bad-fp-base-3.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -fn main() { - let c = 0o3.0f32; //~ ERROR: octal float literal is not supported -} diff --git a/src/test/compile-fail/lex-bad-fp-base-4.rs b/src/test/compile-fail/lex-bad-fp-base-4.rs deleted file mode 100644 index eaea61b0089af..0000000000000 --- a/src/test/compile-fail/lex-bad-fp-base-4.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -fn main() { - let d = 0o4e4; //~ ERROR: octal float literal is not supported -} diff --git a/src/test/compile-fail/lex-bad-fp-base-5.rs b/src/test/compile-fail/lex-bad-fp-base-5.rs deleted file mode 100644 index ee25ed95639e2..0000000000000 --- a/src/test/compile-fail/lex-bad-fp-base-5.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -fn main() { - let e = 0o5.0e5; //~ ERROR: octal float literal is not supported -} diff --git a/src/test/compile-fail/lex-bad-fp-base-6.rs b/src/test/compile-fail/lex-bad-fp-base-6.rs deleted file mode 100644 index bf08ec1eae5fe..0000000000000 --- a/src/test/compile-fail/lex-bad-fp-base-6.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -fn main() { - let f = 0o6e6f32; //~ ERROR: octal float literal is not supported -} diff --git a/src/test/compile-fail/lex-bad-fp-base-7.rs b/src/test/compile-fail/lex-bad-fp-base-7.rs deleted file mode 100644 index 921ed8f1b69e8..0000000000000 --- a/src/test/compile-fail/lex-bad-fp-base-7.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -fn main() { - let g = 0o7.0e7f64; //~ ERROR: octal float literal is not supported -} diff --git a/src/test/compile-fail/lex-bad-fp-base-8.rs b/src/test/compile-fail/lex-bad-fp-base-8.rs deleted file mode 100644 index 10e334ede01c2..0000000000000 --- a/src/test/compile-fail/lex-bad-fp-base-8.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -fn main() { - let h = 0x8.0e+9; //~ ERROR: hexadecimal float literal is not supported -} diff --git a/src/test/compile-fail/lex-bad-fp-base-9.rs b/src/test/compile-fail/lex-bad-fp-base-9.rs deleted file mode 100644 index 3ea151cb9826a..0000000000000 --- a/src/test/compile-fail/lex-bad-fp-base-9.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -fn main() { - let i = 0x9.0e-9; //~ ERROR: hexadecimal float literal is not supported -} diff --git a/src/test/compile-fail/lex-bad-fp-lit.rs b/src/test/compile-fail/lex-bad-fp-lit.rs deleted file mode 100644 index 5a5e9d7d8f238..0000000000000 --- a/src/test/compile-fail/lex-bad-fp-lit.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -static f: float = - 1e+ //~ ERROR: scan_exponent: bad fp literal -; diff --git a/src/test/compile-fail/lex-bad-numeric-literals.rs b/src/test/compile-fail/lex-bad-numeric-literals.rs new file mode 100644 index 0000000000000..9a490be6a0169 --- /dev/null +++ b/src/test/compile-fail/lex-bad-numeric-literals.rs @@ -0,0 +1,35 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +fn main() { + 0o1.0; //~ ERROR: octal float literal is not supported + 0o2f32; //~ ERROR: octal float literal is not supported + 0o3.0f32; //~ ERROR: octal float literal is not supported + 0o4e4; //~ ERROR: octal float literal is not supported + 0o5.0e5; //~ ERROR: octal float literal is not supported + 0o6e6f32; //~ ERROR: octal float literal is not supported + 0o7.0e7f64; //~ ERROR: octal float literal is not supported + 0x8.0e+9; //~ ERROR: hexadecimal float literal is not supported + 0x9.0e-9; //~ ERROR: hexadecimal float literal is not supported + 0o; //~ ERROR: no valid digits + 1e+; //~ ERROR: expected at least one digit in exponent + 0x539.0; //~ ERROR: hexadecimal float literal is not supported + 99999999999999999999999999999999; //~ ERROR: int literal is too large + 99999999999999999999999999999999u32; //~ ERROR: int literal is too large + 0x; //~ ERROR: no valid digits + 0xu32; //~ ERROR: no valid digits + 0ou32; //~ ERROR: no valid digits + 0bu32; //~ ERROR: no valid digits + 0b; //~ ERROR: no valid digits + 0o123f64; //~ ERROR: octal float literal is not supported + 0o123.456; //~ ERROR: octal float literal is not supported + 0b101f64; //~ ERROR: binary float literal is not supported + 0b111.101; //~ ERROR: binary float literal is not supported +} diff --git a/src/test/compile-fail/lex-bad-fp-base-1.rs b/src/test/compile-fail/lex-bad-token.rs similarity index 85% rename from src/test/compile-fail/lex-bad-fp-base-1.rs rename to src/test/compile-fail/lex-bad-token.rs index 659cb5c837955..d28d9a20c6eed 100644 --- a/src/test/compile-fail/lex-bad-fp-base-1.rs +++ b/src/test/compile-fail/lex-bad-token.rs @@ -8,6 +8,4 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -fn main() { - let a = 0o1.0; //~ ERROR: octal float literal is not supported -} +● //~ ERROR: unknown start of token diff --git a/src/test/compile-fail/lex-hex-float-lit.rs b/src/test/compile-fail/lex-hex-float-lit.rs deleted file mode 100644 index 457c6126c44a5..0000000000000 --- a/src/test/compile-fail/lex-hex-float-lit.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -static f: float = - 0x539.0 //~ ERROR: hexadecimal float literal is not supported -; diff --git a/src/test/compile-fail/lex-int-lit-too-large-2.rs b/src/test/compile-fail/lex-int-lit-too-large-2.rs deleted file mode 100644 index 39d1cba64b08b..0000000000000 --- a/src/test/compile-fail/lex-int-lit-too-large-2.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -static i: int = - 99999999999999999999999999999999u32 //~ ERROR: int literal is too large -; diff --git a/src/test/compile-fail/lex-int-lit-too-large.rs b/src/test/compile-fail/lex-int-lit-too-large.rs deleted file mode 100644 index 6343be651fa59..0000000000000 --- a/src/test/compile-fail/lex-int-lit-too-large.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -static i: int = - 99999999999999999999999999999999 //~ ERROR: int literal is too large -; diff --git a/src/test/compile-fail/lex-no-valid-digits-2.rs b/src/test/compile-fail/lex-no-valid-digits-2.rs deleted file mode 100644 index 549dbf5bc8c6c..0000000000000 --- a/src/test/compile-fail/lex-no-valid-digits-2.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -static i: int = - 0xu32 //~ ERROR: no valid digits -; diff --git a/src/test/compile-fail/lex-no-valid-digits.rs b/src/test/compile-fail/lex-no-valid-digits.rs deleted file mode 100644 index 6a5b8e93f010a..0000000000000 --- a/src/test/compile-fail/lex-no-valid-digits.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -static i: int = - 0x //~ ERROR: no valid digits -; diff --git a/src/test/compile-fail/lex-unknown-char-escape.rs b/src/test/compile-fail/lex-unknown-char-escape.rs deleted file mode 100644 index f2445c2b60eba..0000000000000 --- a/src/test/compile-fail/lex-unknown-char-escape.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -static c: char = - '\●' //~ ERROR: unknown character escape -; diff --git a/src/test/compile-fail/lex-unknown-start-tok.rs b/src/test/compile-fail/lex-unknown-start-tok.rs deleted file mode 100644 index 1bb682303451b..0000000000000 --- a/src/test/compile-fail/lex-unknown-start-tok.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -fn main() { - ● //~ ERROR: unknown start of token -} diff --git a/src/test/compile-fail/lex-unknown-str-escape.rs b/src/test/compile-fail/lex-unknown-str-escape.rs deleted file mode 100644 index 9a59c4227114b..0000000000000 --- a/src/test/compile-fail/lex-unknown-str-escape.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -static s: &'static str = - "\●" //~ ERROR: unknown character escape -; diff --git a/src/test/compile-fail/lex-unterminated-char-const.rs b/src/test/compile-fail/lex-unterminated-char-const.rs deleted file mode 100644 index 551360ff9e095..0000000000000 --- a/src/test/compile-fail/lex-unterminated-char-const.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -static c: char = - '● //~ ERROR: unterminated character constant -; diff --git a/src/test/compile-fail/no-oct-float-literal.rs b/src/test/compile-fail/no-oct-float-literal.rs deleted file mode 100644 index 511116b1c559c..0000000000000 --- a/src/test/compile-fail/no-oct-float-literal.rs +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// error-pattern:octal float literal is not supported - -fn main() { - 0o123f64; - 0o123.456; - 0o123p4f; -}