Commits

Andrew Dunstan committed b4c745d

pgindent run

  • Participants
  • Parent commits ad2cee3

Comments (0)

Files changed (2)

File src/backend/utils/adt/json.c

 json_in(PG_FUNCTION_ARGS)
 {
 	char	   *json = PG_GETARG_CSTRING(0);
-	text       *result = cstring_to_text(json);
+	text	   *result = cstring_to_text(json);
 	JsonLexContext *lex;
 
 	/* validate it */
-	lex = makeJsonLexContext(result,false);
+	lex = makeJsonLexContext(result, false);
 	pg_parse_json(lex, NullSemAction);
 
 	/* Internal representation is the same as text, for now */
 	tok = lex_peek(lex);
 
 	/* parse by recursive descent */
-	switch(tok)
+	switch (tok)
 	{
 		case JSON_TOKEN_OBJECT_START:
 			parse_object(lex, sem);
 			parse_array(lex, sem);
 			break;
 		default:
-			parse_scalar(lex, sem); /* json can be a bare scalar */
+			parse_scalar(lex, sem);		/* json can be a bare scalar */
 	}
 
 	lex_expect(JSON_PARSE_END, lex, JSON_TOKEN_END);
 {
 	char	   *val = NULL;
 	json_scalar_action sfunc = sem->scalar;
-	char	   **valaddr;
+	char	  **valaddr;
 	JsonTokenType tok = lex_peek(lex);
 
-	valaddr  = sfunc == NULL ? NULL : &val;
+	valaddr = sfunc == NULL ? NULL : &val;
 
-	switch(tok)
+	switch (tok)
 	{
 		case JSON_TOKEN_TRUE:
 			lex_accept(lex, JSON_TOKEN_TRUE, valaddr);
 			lex_accept(lex, JSON_TOKEN_STRING, valaddr);
 			break;
 		default:
-			report_parse_error(JSON_PARSE_VALUE, lex);	
+			report_parse_error(JSON_PARSE_VALUE, lex);
 	}
 
 	if (sfunc != NULL)
-			(*sfunc) (sem->semstate, val, tok);
+		(*sfunc) (sem->semstate, val, tok);
 }
 
 static void
 	json_ofield_action ostart = sem->object_field_start;
 	json_ofield_action oend = sem->object_field_end;
 	bool		isnull;
-	char      **fnameaddr = NULL;
+	char	  **fnameaddr = NULL;
 	JsonTokenType tok;
 
 	if (ostart != NULL || oend != NULL)
 	lex_expect(JSON_PARSE_OBJECT_LABEL, lex, JSON_TOKEN_COLON);
 
 	tok = lex_peek(lex);
-	isnull = tok  == JSON_TOKEN_NULL;
+	isnull = tok == JSON_TOKEN_NULL;
 
 	if (ostart != NULL)
 		(*ostart) (sem->semstate, fname, isnull);
 	/* we know this will succeeed, just clearing the token */
 	lex_expect(JSON_PARSE_OBJECT_START, lex, JSON_TOKEN_OBJECT_START);
 
-	tok  = lex_peek(lex);
-	switch(tok)
+	tok = lex_peek(lex);
+	switch (tok)
 	{
 		case JSON_TOKEN_STRING:
 			parse_object_field(lex, sem);
 	json_aelem_action astart = sem->array_element_start;
 	json_aelem_action aend = sem->array_element_end;
 	JsonTokenType tok = lex_peek(lex);
-	
+
 	bool		isnull;
 
 	isnull = tok == JSON_TOKEN_NULL;
 	if (astart != NULL)
 		(*astart) (sem->semstate, isnull);
 
-	switch(tok)
+	switch (tok)
 	{
 		case JSON_TOKEN_OBJECT_START:
 			parse_object(lex, sem);
 json_lex(JsonLexContext *lex)
 {
 	char	   *s;
-	int         len;
+	int			len;
+
 	/* Skip leading whitespace. */
 	s = lex->token_terminator;
 	len = s - lex->input;
 		lex->token_terminator = s;
 		lex->token_type = JSON_TOKEN_END;
 	}
-	else	
-		switch(*s)
+	else
+		switch (*s)
 		{
-			/* Single-character token, some kind of punctuation mark. */
+				/* Single-character token, some kind of punctuation mark. */
 			case '{':
 				lex->prev_token_terminator = lex->token_terminator;
 				lex->token_terminator = s + 1;
 				lex->token_type = JSON_TOKEN_NUMBER;
 				break;
 			default:
-			{
-				char	   *p;
-
-				/*
-				 * We're not dealing with a string, number, legal punctuation mark, or
-				 * end of string.  The only legal tokens we might find here are true,
-				 * false, and null, but for error reporting purposes we scan until we
-				 * see a non-alphanumeric character.  That way, we can report the
-				 * whole word as an unexpected token, rather than just some
-				 * unintuitive prefix thereof.
-				 */
-				for (p = s; JSON_ALPHANUMERIC_CHAR(*p) && p - s < lex->input_length - len; p++)
-					/* skip */ ;
-				
-				/*
-				 * We got some sort of unexpected punctuation or an otherwise
-				 * unexpected character, so just complain about that one character.
-				 */
-				if (p == s)
 				{
+					char	   *p;
+
+					/*
+					 * We're not dealing with a string, number, legal
+					 * punctuation mark, or end of string.	The only legal
+					 * tokens we might find here are true, false, and null,
+					 * but for error reporting purposes we scan until we see a
+					 * non-alphanumeric character.	That way, we can report
+					 * the whole word as an unexpected token, rather than just
+					 * some unintuitive prefix thereof.
+					 */
+					for (p = s; JSON_ALPHANUMERIC_CHAR(*p) && p - s < lex->input_length - len; p++)
+						 /* skip */ ;
+
+					/*
+					 * We got some sort of unexpected punctuation or an
+					 * otherwise unexpected character, so just complain about
+					 * that one character.
+					 */
+					if (p == s)
+					{
+						lex->prev_token_terminator = lex->token_terminator;
+						lex->token_terminator = s + 1;
+						report_invalid_token(lex);
+					}
+
+					/*
+					 * We've got a real alphanumeric token here.  If it
+					 * happens to be true, false, or null, all is well.  If
+					 * not, error out.
+					 */
 					lex->prev_token_terminator = lex->token_terminator;
-					lex->token_terminator = s + 1;
-					report_invalid_token(lex);
-				}
-				
-				/*
-				 * We've got a real alphanumeric token here.  If it happens to be
-				 * true, false, or null, all is well.  If not, error out.
-				 */
-				lex->prev_token_terminator = lex->token_terminator;
-				lex->token_terminator = p;
-				if (p - s == 4)
-				{
-					if (memcmp(s, "true", 4) == 0)
-						lex->token_type = JSON_TOKEN_TRUE;
-					else if (memcmp(s, "null", 4) == 0)
-						lex->token_type = JSON_TOKEN_NULL;
+					lex->token_terminator = p;
+					if (p - s == 4)
+					{
+						if (memcmp(s, "true", 4) == 0)
+							lex->token_type = JSON_TOKEN_TRUE;
+						else if (memcmp(s, "null", 4) == 0)
+							lex->token_type = JSON_TOKEN_NULL;
+						else
+							report_invalid_token(lex);
+					}
+					else if (p - s == 5 && memcmp(s, "false", 5) == 0)
+						lex->token_type = JSON_TOKEN_FALSE;
 					else
 						report_invalid_token(lex);
+
 				}
-				else if (p - s == 5 && memcmp(s, "false", 5) == 0)
-					lex->token_type = JSON_TOKEN_FALSE;
-				else
-					report_invalid_token(lex);
-				
-			}
-		} /* end of switch */
+		}						/* end of switch */
 }
 
 /*
 json_lex_string(JsonLexContext *lex)
 {
 	char	   *s;
-	int         len;
+	int			len;
+
 	if (lex->strval != NULL)
 		resetStringInfo(lex->strval);
 
 {
 	bool		error = false;
 	char	   *p;
-	int         len;
+	int			len;
 
 	len = s - lex->input;
 	/* Part (1): leading sign indicator. */
 	 * suffixing "..." if not ending at end of line.
 	 */
 	prefix = (context_start > line_start) ? "..." : "";
-	suffix = (lex->token_type != JSON_TOKEN_END && context_end  - lex->input < lex->input_length && *context_end != '\n' && *context_end != '\r') ? "..." : "";
+	suffix = (lex->token_type != JSON_TOKEN_END && context_end - lex->input < lex->input_length && *context_end != '\n' && *context_end != '\r') ? "..." : "";
 
 	return errcontext("JSON data, line %d: %s%s%s",
 					  line_number, prefix, ctxt, suffix);

File src/backend/utils/adt/jsonfuncs.c

 		if (!_state->use_json_as_text)
 			ereport(ERROR,
 					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-					 errmsg("cannot call %s on a nested object", 
+					 errmsg("cannot call %s on a nested object",
 							_state->function_name)));
 		_state->save_json_start = _state->lex->token_start;
 	}
 	bool		found;
 	char		name[NAMEDATALEN];
 
-    /* 
-     * ignore field names >= NAMEDATALEN - they can't match a record field 
+	/*
+	 * ignore field names >= NAMEDATALEN - they can't match a record field
 	 * ignore nested fields.
 	 */
 	if (_state->lex->lex_level > 2 || strlen(fname) >= NAMEDATALEN)
 	hashentry->isnull = isnull;
 	if (_state->save_json_start != NULL)
 	{
-		int len = _state->lex->prev_token_terminator - _state->save_json_start;
-		char *val = palloc((len+1) * sizeof(char));
-		memcpy(val, _state->save_json_start,len);
+		int			len = _state->lex->prev_token_terminator - _state->save_json_start;
+		char	   *val = palloc((len + 1) * sizeof(char));
+
+		memcpy(val, _state->save_json_start, len);
 		val[len] = '\0';
 		hashentry->val = val;
 	}
 	else if (lex_level > 1 && !_state->use_json_as_text)
 		ereport(ERROR,
 				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-				 errmsg("cannot call populate_recordset with nested objects")));
+			  errmsg("cannot call populate_recordset with nested objects")));
 
 	/* set up a new hash for this entry */
 	memset(&ctl, 0, sizeof(ctl));
 {
 	PopulateRecordsetState _state = (PopulateRecordsetState) state;
 
-	if (_state->lex->lex_level != 0 && ! _state->use_json_as_text)
+	if (_state->lex->lex_level != 0 && !_state->use_json_as_text)
 		ereport(ERROR,
 				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-				 errmsg("cannot call populate_recordset with nested arrays")));
+			   errmsg("cannot call populate_recordset with nested arrays")));
 }
 
 static void
 		if (!_state->use_json_as_text)
 			ereport(ERROR,
 					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-					 errmsg("cannot call populate_recordset on a nested object")));
+			   errmsg("cannot call populate_recordset on a nested object")));
 		_state->save_json_start = _state->lex->token_start;
 	}
 	else
 	bool		found;
 	char		name[NAMEDATALEN];
 
-	/* 
-	 * ignore field names >= NAMEDATALEN - they can't match a record field 
+	/*
+	 * ignore field names >= NAMEDATALEN - they can't match a record field
 	 * ignore nested fields.
 	 */
 	if (_state->lex->lex_level > 2 || strlen(fname) >= NAMEDATALEN)
 	hashentry->isnull = isnull;
 	if (_state->save_json_start != NULL)
 	{
-		int len = _state->lex->prev_token_terminator - _state->save_json_start;
-		char *val = palloc((len+1) * sizeof(char));
-		memcpy(val, _state->save_json_start,len);
+		int			len = _state->lex->prev_token_terminator - _state->save_json_start;
+		char	   *val = palloc((len + 1) * sizeof(char));
+
+		memcpy(val, _state->save_json_start, len);
 		val[len] = '\0';
 		hashentry->val = val;
 	}