diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c index 6da01e1d6f..e4d799cecd 100644 --- a/contrib/postgres_fdw/postgres_fdw.c +++ b/contrib/postgres_fdw/postgres_fdw.c @@ -2738,7 +2738,7 @@ postgresImportForeignSchema(ImportForeignSchemaStmt *stmt, Oid serverOid) } /* Append ORDER BY at the end of query to ensure output ordering */ - appendStringInfo(&buf, " ORDER BY c.relname, a.attnum"); + appendStringInfoString(&buf, " ORDER BY c.relname, a.attnum"); /* Fetch the data */ res = PQexec(conn, buf.data); diff --git a/src/backend/access/rmgrdesc/gindesc.c b/src/backend/access/rmgrdesc/gindesc.c index 324efa3769..09e928fb7c 100644 --- a/src/backend/access/rmgrdesc/gindesc.c +++ b/src/backend/access/rmgrdesc/gindesc.c @@ -113,7 +113,7 @@ gin_desc(StringInfo buf, XLogReaderState *record) (ginxlogRecompressDataLeaf *) payload; if (XLogRecHasBlockImage(record, 0)) - appendStringInfo(buf, " (full page image)"); + appendStringInfoString(buf, " (full page image)"); else desc_recompress_leaf(buf, insertData); } @@ -147,7 +147,7 @@ gin_desc(StringInfo buf, XLogReaderState *record) ginxlogVacuumDataLeafPage *xlrec = (ginxlogVacuumDataLeafPage *) rec; if (XLogRecHasBlockImage(record, 0)) - appendStringInfo(buf, " (full page image)"); + appendStringInfoString(buf, " (full page image)"); else desc_recompress_leaf(buf, &xlrec->data); } diff --git a/src/backend/access/rmgrdesc/spgdesc.c b/src/backend/access/rmgrdesc/spgdesc.c index 6e426d7b8c..478f50c7a0 100644 --- a/src/backend/access/rmgrdesc/spgdesc.c +++ b/src/backend/access/rmgrdesc/spgdesc.c @@ -30,14 +30,14 @@ spg_desc(StringInfo buf, XLogReaderState *record) { spgxlogAddLeaf *xlrec = (spgxlogAddLeaf *) rec; - appendStringInfo(buf, "add leaf to page"); + appendStringInfoString(buf, "add leaf to page"); appendStringInfo(buf, "; off %u; headoff %u; parentoff %u", xlrec->offnumLeaf, xlrec->offnumHeadLeaf, xlrec->offnumParent); if (xlrec->newPage) - appendStringInfo(buf, " (newpage)"); + appendStringInfoString(buf, " (newpage)"); if (xlrec->storesNulls) - appendStringInfo(buf, " (nulls)"); + appendStringInfoString(buf, " (nulls)"); } break; case XLOG_SPGIST_MOVE_LEAFS: @@ -63,9 +63,9 @@ spg_desc(StringInfo buf, XLogReaderState *record) appendStringInfo(buf, "ndel %u; nins %u", xlrec->nDelete, xlrec->nInsert); if (xlrec->innerIsParent) - appendStringInfo(buf, " (innerIsParent)"); + appendStringInfoString(buf, " (innerIsParent)"); if (xlrec->isRootSplit) - appendStringInfo(buf, " (isRootSplit)"); + appendStringInfoString(buf, " (isRootSplit)"); } break; case XLOG_SPGIST_VACUUM_LEAF: diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c index 7b5f983050..e811c0a61e 100644 --- a/src/backend/access/rmgrdesc/xactdesc.c +++ b/src/backend/access/rmgrdesc/xactdesc.c @@ -232,7 +232,7 @@ xact_desc_commit(StringInfo buf, uint8 info, xl_xact_commit *xlrec, RepOriginId } if (XactCompletionForceSyncCommit(parsed.xinfo)) - appendStringInfo(buf, "; sync"); + appendStringInfoString(buf, "; sync"); if (parsed.xinfo & XACT_XINFO_HAS_ORIGIN) { diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index bb7cd9f775..1dd31b37ff 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -1097,7 +1097,7 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn) if (!debug_reader) { - appendStringInfo(&buf, "error decoding record: out of memory"); + appendStringInfoString(&buf, "error decoding record: out of memory"); } else if (!DecodeXLogRecord(debug_reader, (XLogRecord *) recordBuf.data, &errormsg)) @@ -9528,7 +9528,7 @@ xlog_outrec(StringInfo buf, XLogReaderState *record) rnode.spcNode, rnode.dbNode, rnode.relNode, blk); if (XLogRecHasBlockImage(record, block_id)) - appendStringInfo(buf, " FPW"); + appendStringInfoString(buf, " FPW"); } } #endif /* WAL_DEBUG */ diff --git a/src/backend/lib/pairingheap.c b/src/backend/lib/pairingheap.c index 3d8a5ea561..7ca35452de 100644 --- a/src/backend/lib/pairingheap.c +++ b/src/backend/lib/pairingheap.c @@ -306,7 +306,7 @@ pairingheap_dump_recurse(StringInfo buf, appendStringInfoSpaces(buf, depth * 4); dumpfunc(node, buf, opaque); - appendStringInfoString(buf, "\n"); + appendStringInfoChar(buf, '\n'); if (node->first_child) pairingheap_dump_recurse(buf, node->first_child, dumpfunc, opaque, depth + 1, node); prev_or_parent = node; diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 2cd4b62701..5112cac901 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -5487,7 +5487,7 @@ get_insert_query_def(Query *query, deparse_context *context) { OnConflictExpr *confl = query->onConflict; - appendStringInfo(buf, " ON CONFLICT"); + appendStringInfoString(buf, " ON CONFLICT"); if (confl->arbiterElems) { diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index 99bc832ab8..31dfc4d2a7 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -2473,7 +2473,7 @@ query_to_xml_internal(const char *query, char *tablename, { xmldata_root_element_start(result, xmltn, xmlschema, targetns, top_level); - appendStringInfoString(result, "\n"); + appendStringInfoChar(result, '\n'); } if (xmlschema) @@ -2637,7 +2637,7 @@ schema_to_xml_internal(Oid nspid, const char *xmlschema, bool nulls, result = makeStringInfo(); xmldata_root_element_start(result, xmlsn, xmlschema, targetns, top_level); - appendStringInfoString(result, "\n"); + appendStringInfoChar(result, '\n'); if (xmlschema) appendStringInfo(result, "%s\n\n", xmlschema); @@ -2815,7 +2815,7 @@ database_to_xml_internal(const char *xmlschema, bool nulls, result = makeStringInfo(); xmldata_root_element_start(result, xmlcn, xmlschema, targetns, true); - appendStringInfoString(result, "\n"); + appendStringInfoChar(result, '\n'); if (xmlschema) appendStringInfo(result, "%s\n\n", xmlschema); diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c index 5dd2887d12..536368020b 100644 --- a/src/bin/pg_basebackup/pg_basebackup.c +++ b/src/bin/pg_basebackup/pg_basebackup.c @@ -1516,7 +1516,7 @@ GenerateRecoveryConf(PGconn *conn) /* Separate key-value pairs with spaces */ if (conninfo_buf.len != 0) - appendPQExpBufferStr(&conninfo_buf, " "); + appendPQExpBufferChar(&conninfo_buf, ' '); /* * Write "keyword=value" pieces, the value string is escaped and/or diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index f9b564eee4..0d52babc4f 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -533,7 +533,7 @@ RestoreArchive(Archive *AHX) * search for hardcoded "DROP CONSTRAINT" instead. */ if (strcmp(te->desc, "DEFAULT") == 0) - appendPQExpBuffer(ftStmt, "%s", dropStmt); + appendPQExpBufferStr(ftStmt, dropStmt); else { if (strcmp(te->desc, "CONSTRAINT") == 0 || diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index a72dfe93da..0a8129020b 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -1659,7 +1659,7 @@ dumpTableData_insert(Archive *fout, DumpOptions *dopt, void *dcontext) /* append the list of column names if required */ if (dopt->column_inserts) { - appendPQExpBufferStr(insertStmt, "("); + appendPQExpBufferChar(insertStmt, '('); for (field = 0; field < nfields; field++) { if (field > 0) @@ -11332,7 +11332,7 @@ dumpOpclass(Archive *fout, DumpOptions *dopt, OpclassInfo *opcinfo) appendPQExpBufferStr(q, " FAMILY "); if (strcmp(opcfamilynsp, opcinfo->dobj.namespace->dobj.name) != 0) appendPQExpBuffer(q, "%s.", fmtId(opcfamilynsp)); - appendPQExpBuffer(q, "%s", fmtId(opcfamilyname)); + appendPQExpBufferStr(q, fmtId(opcfamilyname)); } appendPQExpBufferStr(q, " AS\n "); @@ -13844,7 +13844,7 @@ dumpTableSchema(Archive *fout, DumpOptions *dopt, TableInfo *tbinfo) if (actual_atts == 0) appendPQExpBufferStr(q, " ("); else - appendPQExpBufferStr(q, ","); + appendPQExpBufferChar(q, ','); appendPQExpBufferStr(q, "\n "); actual_atts++; diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c index db568096dc..f63c7e90d3 100644 --- a/src/bin/psql/describe.c +++ b/src/bin/psql/describe.c @@ -1611,7 +1611,7 @@ describeOneTableDetails(const char *schemaname, if (!PQgetisnull(res, i, 5)) { if (tmpbuf.len > 0) - appendPQExpBufferStr(&tmpbuf, " "); + appendPQExpBufferChar(&tmpbuf, ' '); appendPQExpBuffer(&tmpbuf, _("collate %s"), PQgetvalue(res, i, 5)); } @@ -1619,7 +1619,7 @@ describeOneTableDetails(const char *schemaname, if (strcmp(PQgetvalue(res, i, 3), "t") == 0) { if (tmpbuf.len > 0) - appendPQExpBufferStr(&tmpbuf, " "); + appendPQExpBufferChar(&tmpbuf, ' '); appendPQExpBufferStr(&tmpbuf, _("not null")); } @@ -1628,7 +1628,7 @@ describeOneTableDetails(const char *schemaname, if (strlen(PQgetvalue(res, i, 2)) != 0) { if (tmpbuf.len > 0) - appendPQExpBufferStr(&tmpbuf, " "); + appendPQExpBufferChar(&tmpbuf, ' '); /* translator: default values of column definitions */ appendPQExpBuffer(&tmpbuf, _("default %s"), PQgetvalue(res, i, 2)); @@ -2440,7 +2440,7 @@ describeOneTableDetails(const char *schemaname, printfPQExpBuffer(&buf, "%*s %s", sw, "", PQgetvalue(result, i, 0)); if (i < tuples - 1) - appendPQExpBufferStr(&buf, ","); + appendPQExpBufferChar(&buf, ','); printTableAddFooter(&cont, buf.data); } diff --git a/src/bin/scripts/clusterdb.c b/src/bin/scripts/clusterdb.c index 85087af795..8c0e7cfab2 100644 --- a/src/bin/scripts/clusterdb.c +++ b/src/bin/scripts/clusterdb.c @@ -201,7 +201,7 @@ cluster_one_database(const char *dbname, bool verbose, const char *table, appendPQExpBufferStr(&sql, " VERBOSE"); if (table) appendPQExpBuffer(&sql, " %s", table); - appendPQExpBufferStr(&sql, ";"); + appendPQExpBufferChar(&sql, ';'); conn = connectDatabase(dbname, host, port, username, prompt_password, progname, false); diff --git a/src/bin/scripts/createdb.c b/src/bin/scripts/createdb.c index a958bb86f0..4d3fb22622 100644 --- a/src/bin/scripts/createdb.c +++ b/src/bin/scripts/createdb.c @@ -195,7 +195,7 @@ main(int argc, char *argv[]) if (lc_ctype) appendPQExpBuffer(&sql, " LC_CTYPE '%s'", lc_ctype); - appendPQExpBufferStr(&sql, ";"); + appendPQExpBufferChar(&sql, ';'); /* No point in trying to use postgres db when creating postgres db. */ if (maintenance_db == NULL && strcmp(dbname, "postgres") == 0) @@ -222,7 +222,7 @@ main(int argc, char *argv[]) { printfPQExpBuffer(&sql, "COMMENT ON DATABASE %s IS ", fmtId(dbname)); appendStringLiteralConn(&sql, comment, conn); - appendPQExpBufferStr(&sql, ";"); + appendPQExpBufferChar(&sql, ';'); if (echo) printf("%s\n", sql.data); diff --git a/src/bin/scripts/createuser.c b/src/bin/scripts/createuser.c index fba21a1c65..c8bcf0d0b2 100644 --- a/src/bin/scripts/createuser.c +++ b/src/bin/scripts/createuser.c @@ -321,7 +321,7 @@ main(int argc, char *argv[]) appendPQExpBuffer(&sql, "%s", fmtId(cell->val)); } } - appendPQExpBufferStr(&sql, ";"); + appendPQExpBufferChar(&sql, ';'); if (echo) printf("%s\n", sql.data); diff --git a/src/bin/scripts/reindexdb.c b/src/bin/scripts/reindexdb.c index 941729da2e..80c78860be 100644 --- a/src/bin/scripts/reindexdb.c +++ b/src/bin/scripts/reindexdb.c @@ -295,7 +295,7 @@ reindex_one_database(const char *name, const char *dbname, const char *type, appendPQExpBuffer(&sql, " SCHEMA %s", name); else if (strcmp(type, "DATABASE") == 0) appendPQExpBuffer(&sql, " DATABASE %s", fmtId(name)); - appendPQExpBufferStr(&sql, ";"); + appendPQExpBufferChar(&sql, ';'); conn = connectDatabase(dbname, host, port, username, prompt_password, progname, false); diff --git a/src/bin/scripts/vacuumdb.c b/src/bin/scripts/vacuumdb.c index f600b0514a..ca6d003683 100644 --- a/src/bin/scripts/vacuumdb.c +++ b/src/bin/scripts/vacuumdb.c @@ -392,7 +392,7 @@ vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, ntups = PQntuples(res); for (i = 0; i < ntups; i++) { - appendPQExpBuffer(&buf, "%s", + appendPQExpBufferStr(&buf, fmtQualifiedId(PQserverVersion(conn), PQgetvalue(res, i, 1), PQgetvalue(res, i, 0))); @@ -643,7 +643,7 @@ prepare_vacuum_command(PQExpBuffer sql, PGconn *conn, vacuumingOptions *vacopts, sep = comma; } if (sep != paren) - appendPQExpBufferStr(sql, ")"); + appendPQExpBufferChar(sql, ')'); } else {